[aGrum] fine tuning approximation inference (Hybrid still to be done)

parent e4769aac
......@@ -40,69 +40,79 @@
#define GIBBSKL_DEFAULT_BURNIN 2000
#define GIBBSKL_DEFAULT_TIMEOUT 6000
#define GIBBSKL_POURCENT_DRAWN_SAMPLE 10 // percent drawn
#define GIBBSKL_DRAWN_AT_RANDOM false
namespace gum {
template <typename GUM_SCALAR>
GibbsKL<GUM_SCALAR>::GibbsKL( const IBayesNet<GUM_SCALAR>& P,
const IBayesNet<GUM_SCALAR>& Q )
: KL<GUM_SCALAR>( P, Q )
template < typename GUM_SCALAR >
GibbsKL< GUM_SCALAR >::GibbsKL(const IBayesNet< GUM_SCALAR >& P,
const IBayesNet< GUM_SCALAR >& Q)
: KL< GUM_SCALAR >(P, Q)
, ApproximationScheme()
, GibbsOperator<GUM_SCALAR>( P ) {
GUM_CONSTRUCTOR( GibbsKL );
setEpsilon( GIBBSKL_DEFAULT_EPSILON );
setMinEpsilonRate( GIBBSKL_DEFAULT_MIN_EPSILON_RATE );
setMaxIter( GIBBSKL_DEFAULT_MAXITER );
setVerbosity( GIBBSKL_DEFAULT_VERBOSITY );
setBurnIn( GIBBSKL_DEFAULT_BURNIN );
setPeriodSize( GIBBSKL_DEFAULT_PERIOD_SIZE );
setMaxTime( GIBBSKL_DEFAULT_TIMEOUT );
, GibbsOperator< GUM_SCALAR >(
P,
nullptr,
1 + (P.size() * GIBBSKL_POURCENT_DRAWN_SAMPLE / 100),
GIBBSKL_DRAWN_AT_RANDOM) {
GUM_CONSTRUCTOR(GibbsKL);
setEpsilon(GIBBSKL_DEFAULT_EPSILON);
setMinEpsilonRate(GIBBSKL_DEFAULT_MIN_EPSILON_RATE);
setMaxIter(GIBBSKL_DEFAULT_MAXITER);
setVerbosity(GIBBSKL_DEFAULT_VERBOSITY);
setBurnIn(GIBBSKL_DEFAULT_BURNIN);
setPeriodSize(GIBBSKL_DEFAULT_PERIOD_SIZE);
setMaxTime(GIBBSKL_DEFAULT_TIMEOUT);
}
template <typename GUM_SCALAR>
GibbsKL<GUM_SCALAR>::GibbsKL( const KL<GUM_SCALAR>& kl )
: KL<GUM_SCALAR>( kl )
template < typename GUM_SCALAR >
GibbsKL< GUM_SCALAR >::GibbsKL(const KL< GUM_SCALAR >& kl)
: KL< GUM_SCALAR >(kl)
, ApproximationScheme()
// Gibbs operator with 10% of nodes changes at random between each samples
, GibbsOperator<GUM_SCALAR>( kl.p(), 1 + ( kl.p().size() / 10 ), true ) {
GUM_CONSTRUCTOR( GibbsKL );
setEpsilon( GIBBSKL_DEFAULT_EPSILON );
setMinEpsilonRate( GIBBSKL_DEFAULT_MIN_EPSILON_RATE );
setMaxIter( GIBBSKL_DEFAULT_MAXITER );
setVerbosity( GIBBSKL_DEFAULT_VERBOSITY );
setBurnIn( GIBBSKL_DEFAULT_BURNIN );
setPeriodSize( GIBBSKL_DEFAULT_PERIOD_SIZE );
setMaxTime( GIBBSKL_DEFAULT_TIMEOUT );
, GibbsOperator< GUM_SCALAR >(
kl.p(),
nullptr,
1 + (kl.p().size() * GIBBSKL_POURCENT_DRAWN_SAMPLE / 100),
true) {
GUM_CONSTRUCTOR(GibbsKL);
setEpsilon(GIBBSKL_DEFAULT_EPSILON);
setMinEpsilonRate(GIBBSKL_DEFAULT_MIN_EPSILON_RATE);
setMaxIter(GIBBSKL_DEFAULT_MAXITER);
setVerbosity(GIBBSKL_DEFAULT_VERBOSITY);
setBurnIn(GIBBSKL_DEFAULT_BURNIN);
setPeriodSize(GIBBSKL_DEFAULT_PERIOD_SIZE);
setMaxTime(GIBBSKL_DEFAULT_TIMEOUT);
}
template <typename GUM_SCALAR>
GibbsKL<GUM_SCALAR>::~GibbsKL() {
GUM_DESTRUCTOR( GibbsKL );
template < typename GUM_SCALAR >
GibbsKL< GUM_SCALAR >::~GibbsKL() {
GUM_DESTRUCTOR(GibbsKL);
}
template <typename GUM_SCALAR>
void GibbsKL<GUM_SCALAR>::_computeKL() {
template < typename GUM_SCALAR >
void GibbsKL< GUM_SCALAR >::_computeKL() {
gum::Instantiation Iq;
_q.completeInstantiation( Iq );
_q.completeInstantiation(Iq);
gum::Instantiation I = this->monteCarloSample();
initApproximationScheme();
// map between particle() variables and _q variables (using name of vars)
HashTable<const DiscreteVariable*, const DiscreteVariable*> map;
HashTable< const DiscreteVariable*, const DiscreteVariable* > map;
for ( Idx ite = 0; ite < I.nbrDim(); ++ite ) {
map.insert( &I.variable( ite ),
&_q.variableFromName( I.variable( ite ).name() ) );
for (Idx ite = 0; ite < I.nbrDim(); ++ite) {
map.insert(&I.variable(ite), &_q.variableFromName(I.variable(ite).name()));
}
// BURN IN
for ( Idx i = 0; i < burnIn(); i++ )
I = this->nextSample( I );
for (Idx i = 0; i < burnIn(); i++)
I = this->nextSample(I);
// SAMPLING
_klPQ = _klQP = _hellinger = (GUM_SCALAR)0.0;
......@@ -116,52 +126,52 @@ namespace gum {
do {
this->disableMinEpsilonRate();
I = this->nextSample( I );
I = this->nextSample(I);
updateApproximationScheme();
//_p.synchroInstantiations( Ip,I);
Iq.setValsFrom( map, I );
Iq.setValsFrom(map, I);
pp = _p.jointProbability( I );
pq = _q.jointProbability( Iq );
pp = _p.jointProbability(I);
pq = _q.jointProbability(Iq);
if ( pp != (GUM_SCALAR)0.0 ) {
_hellinger += std::pow( std::sqrt( pp ) - std::sqrt( pq ), 2 ) / pp;
if (pp != (GUM_SCALAR)0.0) {
_hellinger += std::pow(std::sqrt(pp) - std::sqrt(pq), 2) / pp;
if ( pq != (GUM_SCALAR)0.0 ) {
_bhattacharya += std::sqrt( pq / pp ); // std::sqrt(pp*pq)/pp
if (pq != (GUM_SCALAR)0.0) {
_bhattacharya += std::sqrt(pq / pp); // std::sqrt(pp*pq)/pp
/// check_rate=true;
this->enableMinEpsilonRate(); // replace check_rate=true;
ratio = pq / pp;
delta = (GUM_SCALAR)log2( ratio );
delta = (GUM_SCALAR)log2(ratio);
_klPQ += delta;
} else {
_errorPQ++;
}
}
if ( pq != (GUM_SCALAR)0.0 ) {
if ( pp != (GUM_SCALAR)0.0 ) {
if (pq != (GUM_SCALAR)0.0) {
if (pp != (GUM_SCALAR)0.0) {
// if we are here, it is certain that delta and ratio have been
// computed
// further lines above. (for now #112-113)
_klQP += ( GUM_SCALAR )( -delta * ratio );
_klQP += (GUM_SCALAR)(-delta * ratio);
} else {
_errorQP++;
}
}
if ( this->isEnabledMinEpsilonRate() ) { // replace check_rate
if (this->isEnabledMinEpsilonRate()) { // replace check_rate
// delta is used as a temporary variable
delta = _klPQ / nbrIterations();
error = (GUM_SCALAR)std::abs( delta - oldPQ );
error = (GUM_SCALAR)std::abs(delta - oldPQ);
oldPQ = delta;
}
} while ( continueApproximationScheme( error ) ); //
} while (continueApproximationScheme(error)); //
_klPQ = -_klPQ / ( nbrIterations() );
_klQP = -_klQP / ( nbrIterations() );
_hellinger = std::sqrt( _hellinger / nbrIterations() );
_bhattacharya = -std::log( _bhattacharya );
_klPQ = -_klPQ / (nbrIterations());
_klQP = -_klQP / (nbrIterations());
_hellinger = std::sqrt(_hellinger / nbrIterations());
_bhattacharya = -std::log(_bhattacharya);
}
}
......@@ -32,7 +32,7 @@
#define GIBBS_DEFAULT_MIN_EPSILON_RATE 1e-4
#define GIBBS_DEFAULT_PERIOD_SIZE 500
#define GIBBS_DEFAULT_VERBOSITY false
#define GIBBS_DEFAULT_BURNIN 3000
#define GIBBS_DEFAULT_BURNIN 10000
// to ease parsing for IDE
#include <agrum/BN/inference/tools/BayesNetInference.h>
......
......@@ -26,6 +26,12 @@
#include <agrum/BN/inference/GibbsSampling.h>
#define GIBBS_DEFAULT_EPSILON 1e-4 * std::log(2)
#define GIBBS_DEFAULT_MIN_EPSILON_RATE 1e-6 * std::log(2)
#define GIBBS_DEFAULT_BURNIN 1000
#define GIBBS_POURCENT_DRAWN_SAMPLE 10 // percent drawn
#define GIBBS_DRAWN_AT_RANDOM false
namespace gum {
......@@ -33,8 +39,16 @@ namespace gum {
template < typename GUM_SCALAR >
GibbsSampling< GUM_SCALAR >::GibbsSampling(const IBayesNet< GUM_SCALAR >* BN)
: ApproximateInference< GUM_SCALAR >(BN)
, GibbsOperator< GUM_SCALAR >(*BN) {
, GibbsOperator< GUM_SCALAR >(
*BN,
&this->hardEvidence(),
1 + (BN->size() * GIBBS_POURCENT_DRAWN_SAMPLE / 100),
GIBBS_DRAWN_AT_RANDOM) {
GUM_CONSTRUCTOR(GibbsSampling);
this->setEpsilon(GIBBS_DEFAULT_EPSILON);
this->setMinEpsilonRate(GIBBS_DEFAULT_MIN_EPSILON_RATE);
this->setBurnIn(GIBBS_DEFAULT_BURNIN);
}
/// destructor
......
......@@ -64,13 +64,20 @@ namespace gum {
HybridApproxInference(const IBayesNet< GUM_SCALAR >* bn);
/**
* destructor
*/
* destructor
*/
virtual ~HybridApproxInference();
/// makes the inference by generating samples w.r.t the mother class' sampling
/// method after initalizing estimators with loopy belief propagation
virtual void _makeInference();
void setVirtualLBPSize(GUM_SCALAR vlbpsize) {
if (vlbpsize > 0) _virtualLBPSize = vlbpsize;
};
protected:
GUM_SCALAR _virtualLBPSize;
};
extern template class HybridApproxInference< float, WeightedSampling >;
......
......@@ -28,7 +28,8 @@
#include <agrum/BN/inference/hybridApproxInference.h>
#define DEFAULT_LBP_MAX_ITER 20
#define DEFAULT_VIRTUAL_LBP_SIZE 1000
namespace gum {
......@@ -36,8 +37,8 @@ namespace gum {
template < typename GUM_SCALAR, template < typename > class APPROX >
HybridApproxInference< GUM_SCALAR, APPROX >::HybridApproxInference(
const IBayesNet< GUM_SCALAR >* BN)
: APPROX< GUM_SCALAR >(BN) {
: APPROX< GUM_SCALAR >(BN)
, _virtualLBPSize(DEFAULT_VIRTUAL_LBP_SIZE) {
GUM_CONSTRUCTOR(HybridApproxInference);
}
......@@ -53,27 +54,12 @@ namespace gum {
void HybridApproxInference< GUM_SCALAR, APPROX >::_makeInference() {
LoopyBeliefPropagation< GUM_SCALAR > lbp(&this->BN());
lbp.setMaxIter(DEFAULT_LBP_MAX_ITER);
lbp.makeInference();
if (!this->isSetEstimator)
this->_setEstimatorFromLBP(&lbp);
this->initApproximationScheme();
gum::Instantiation Ip;
float w = .0; //
// Burn in
Ip = this->_burnIn();
do {
Ip = this->_draw(&w, Ip);
this->__estimator.update(Ip, w);
this->updateApproximationScheme();
} while (this->continueApproximationScheme(this->__estimator.confidence()));
if (!this->isSetEstimator) {
this->_setEstimatorFromLBP(&lbp,_virtualLBPSize);
}
this->isSetEstimator = false;
this->_loopApproxInference();
}
}
......@@ -54,8 +54,8 @@ namespace gum {
* and from ApproximationScheme.
*/
template <typename GUM_SCALAR>
class ApproximateInference : public MarginalTargetedInference<GUM_SCALAR>,
template < typename GUM_SCALAR >
class ApproximateInference : public MarginalTargetedInference< GUM_SCALAR >,
public ApproximationScheme {
public:
......@@ -69,7 +69,7 @@ namespace gum {
* @warning note that, by aGrUM's rule, the BN is not copied but only
* referenced by the inference algorithm. */
ApproximateInference( const IBayesNet<GUM_SCALAR>* bn );
ApproximateInference(const IBayesNet< GUM_SCALAR >* bn);
/// destructor
virtual ~ApproximateInference();
......@@ -81,7 +81,7 @@ namespace gum {
// ############################################################################
/// @{
/// get the BayesNet which is used to really perform the sampling
const IBayesNet<GUM_SCALAR>& samplingBN();
const IBayesNet< GUM_SCALAR >& samplingBN();
/// Computes and returns the posterior of a node.
/**
......@@ -95,7 +95,7 @@ namespace gum {
* @throw UndefinedElement if node is not in the set of targets.
* @throw NotFound if node is not in the BN.
*/
virtual const Potential<GUM_SCALAR>& _posterior( const NodeId id );
virtual const Potential< GUM_SCALAR >& _posterior(const NodeId id);
/// Computes and returns the posterior of a node referred by it's name.
/**
......@@ -111,7 +111,7 @@ namespace gum {
* targets.
* @throw NotFound if node corresponding to name is not in the BN.
*/
virtual const Potential<GUM_SCALAR>& _posterior( const std::string& name );
virtual const Potential< GUM_SCALAR >& _posterior(const std::string& name);
/// @}
......@@ -137,7 +137,8 @@ namespace gum {
/// Initializes the estimators object linked to the simulation
/**
* Initializes the estimator object by creating a hashtable between non
* evidence nodes and a 0-filled potential which will approximate the node's posterior
* evidence nodes and a 0-filled potential which will approximate the node's
* posterior
*
*/
virtual void _setEstimatorFromBN();
......@@ -145,18 +146,20 @@ namespace gum {
/// Initializes the estimators object linked to the simulation
/**
* @param lbp a LoopyBeliefPropagation object
* @param virtualLBPSize the size of the equivalent sampling by LBP
*
* Initializes the estimator object by creating a hashtable between
* non evidence nodes and the current approximation of the node's posterior
* obtained by running LoopyBeliefPropagation algorithm
*
*/
virtual void _setEstimatorFromLBP( LoopyBeliefPropagation<GUM_SCALAR>* lbp );
virtual void _setEstimatorFromLBP(LoopyBeliefPropagation< GUM_SCALAR >* lbp,
GUM_SCALAR virtualLBPSize);
///@}
protected:
/// Estimator object designed to approximate target posteriors
Estimator<GUM_SCALAR> __estimator;
Estimator< GUM_SCALAR > __estimator;
/// whether the Estimator object has been initialized
bool isSetEstimator = false;
......@@ -171,15 +174,13 @@ namespace gum {
/**
* @param w the weight of sample being generated
* @param prev the previous sample generated
* @param bn the bayesian network containing the evidence
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
*/
virtual Instantiation _draw( float* w, Instantiation prev ) = 0;
virtual Instantiation _draw(float* w, Instantiation prev) = 0;
/// makes the inference by generating samples
virtual void _makeInference();
void _loopApproxInference();
/// adds a node to current instantiation
/**
......@@ -189,8 +190,7 @@ namespace gum {
* generates random value based on the BN's CPT's and adds the node to the
* Instantiation with that value
*/
virtual void
_addVarSample( NodeId nod, Instantiation* I);
virtual void _addVarSample(NodeId nod, Instantiation* I);
/// fired when Bayesian network is contextualized
......@@ -201,48 +201,46 @@ namespace gum {
* @param hardEv hard evidences values
*
*/
virtual void _onContextualize( BayesNetFragment<GUM_SCALAR>* bn ){};
virtual void _onContextualize(BayesNetFragment< GUM_SCALAR >* bn){};
virtual void _onEvidenceAdded( const NodeId id, bool isHardEvidence ) {
if ( !isHardEvidence ) {
GUM_ERROR( FatalError,
"Approximated inference only accept hard evidence" );
virtual void _onEvidenceAdded(const NodeId id, bool isHardEvidence) {
if (!isHardEvidence) {
GUM_ERROR(FatalError, "Approximated inference only accept hard evidence");
}
};
virtual void _onEvidenceErased( const NodeId id, bool isHardEvidence ){};
virtual void _onEvidenceErased(const NodeId id, bool isHardEvidence){};
virtual void _onAllEvidenceErased( bool contains_hard_evidence ){};
virtual void _onAllEvidenceErased(bool contains_hard_evidence){};
virtual void _onEvidenceChanged( const NodeId id, bool hasChangedSoftHard ) {
if ( hasChangedSoftHard ) {
GUM_ERROR( FatalError,
"Approximated inference only accept hard evidence" );
virtual void _onEvidenceChanged(const NodeId id, bool hasChangedSoftHard) {
if (hasChangedSoftHard) {
GUM_ERROR(FatalError, "Approximated inference only accept hard evidence");
}
};
virtual void _onBayesNetChanged( const IBayesNet<GUM_SCALAR>* bn ){};
virtual void _onBayesNetChanged(const IBayesNet< GUM_SCALAR >* bn){};
virtual void _updateOutdatedBNStructure(){};
virtual void _updateOutdatedBNPotentials(){};
virtual void _onMarginalTargetAdded( const NodeId id ){};
virtual void _onMarginalTargetAdded(const NodeId id){};
virtual void _onMarginalTargetErased( const NodeId id ){};
virtual void _onMarginalTargetErased(const NodeId id){};
virtual void _onAllMarginalTargetsAdded(){};
virtual void _onAllMarginalTargetsErased(){};
private:
BayesNetFragment<GUM_SCALAR>* __samplingBN;
BayesNetFragment< GUM_SCALAR >* __samplingBN;
};
extern template class ApproximateInference<float>;
extern template class ApproximateInference<double>;
extern template class ApproximateInference< float >;
extern template class ApproximateInference< double >;
}
#include <agrum/BN/inference/tools/approximateInference_tpl.h>
......
......@@ -32,12 +32,12 @@
#define DEFAULT_MAXITER 10000000
#define DEFAULT_MIN_EPSILON_RATE 1e-2
#define DEFAULT_PERIOD_SIZE 500
#define DEFAULT_PERIOD_SIZE 100
#define DEFAULT_VERBOSITY false
#define DEFAULT_BURNIN 2000
#define DEFAULT_BURNIN 0
#define DEFAULT_TIMEOUT 6000
#define DEFAULT_EPSILON 1e-2
#define DEFAULT_MIN_EPSILON_RATE 1e-5
namespace gum {
......@@ -89,9 +89,9 @@ namespace gum {
template < typename GUM_SCALAR >
void ApproximateInference< GUM_SCALAR >::_setEstimatorFromLBP(
LoopyBeliefPropagation< GUM_SCALAR >* lbp) {
LoopyBeliefPropagation< GUM_SCALAR >* lbp, GUM_SCALAR virtualLBPSize) {
__estimator.setFromLBP(lbp, this->hardEvidenceNodes());
__estimator.setFromLBP(lbp, this->hardEvidenceNodes(), virtualLBPSize);
this->isSetEstimator = true;
}
......@@ -158,13 +158,17 @@ namespace gum {
template < typename GUM_SCALAR >
void ApproximateInference< GUM_SCALAR >::_makeInference() {
if (!isSetEstimator) this->_setEstimatorFromBN();
_loopApproxInference();
}
template < typename GUM_SCALAR >
void ApproximateInference< GUM_SCALAR >::_loopApproxInference() {
//@todo This should be in __prepareInference
if (!isContextualized) {
this->contextualize();
}
if (!isSetEstimator) this->_setEstimatorFromBN();
initApproximationScheme();
gum::Instantiation Ip;
float w = .0; //
......
......@@ -71,7 +71,8 @@ namespace gum {
* sets the estimatoor object with posteriors obtained by LoopyBeliefPropagation
*/
void setFromLBP( LoopyBeliefPropagation<GUM_SCALAR>* lbp,
const NodeSet& hardEvidence );
const NodeSet& hardEvidence,
GUM_SCALAR virtualLBPSize );
/** @} */
/// computes the maximum length of confidence interval for each possible value
......
......@@ -93,23 +93,22 @@ namespace gum {
template < typename GUM_SCALAR >
void
Estimator< GUM_SCALAR >::setFromLBP(LoopyBeliefPropagation< GUM_SCALAR >* lbp,
const NodeSet& hardEvidence) {
const NodeSet& hardEvidence,
GUM_SCALAR virtualLBPSize) {
for (gum::NodeGraphPartIterator iter = lbp->BN().nodes().begin();
iter != lbp->BN().nodes().end();
++iter) {
for (const auto& node : lbp->BN().nodes()) {
if (!hardEvidence.contains(*iter)) {
if (!hardEvidence.contains(node)) {
std::vector< GUM_SCALAR > v;
auto p = lbp->posterior(*iter);
auto p = lbp->posterior(node);
gum::Instantiation inst(p);
for (inst.setFirst(); !inst.end(); ++inst) {
v.push_back(p[inst] * lbp->nbrIterations());
v.push_back(p[inst] * virtualLBPSize);
}
_estimator.insert(lbp->BN().variable(*iter).name(), v);
_estimator.insert(lbp->BN().variable(node).name(), v);
}
}
}
......
......@@ -47,13 +47,10 @@ namespace gum {
public:
/**
* constructors
* constructor
*/
GibbsOperator(const IBayesNet< GUM_SCALAR >& BN,
Size nbr = 1,
bool atRandom = false);
GibbsOperator(const IBayesNet< GUM_SCALAR >& BN,
const NodeProperty< Idx >& hardEv,
const NodeProperty< Idx >* hardEv,
Size nbr = 1,
bool atRandom = false);
......@@ -62,7 +59,16 @@ namespace gum {
*/
virtual ~GibbsOperator();
// draws a Monte Carlo sample
/** Getters and setters*/
Size nbrDrawnVar() const { return _nbr; }
void setNbrDrawnVar(Size _nbr) { _nbr = _nbr; }
bool isDrawnAtRandom() const { return _atRandom; }
void setDrawnAtRandom(bool _atRandom) { _atRandom = _atRandom; }
/// draws a Monte Carlo sample
Instantiation monteCarloSample();
/// draws next sample of Gibbs sampling
......@@ -76,6 +82,8 @@ namespace gum {
Sequence< NodeId > _samplingNodes;
Size _nbr;
protected:
bool _atRandom;
......
......@@ -29,27 +29,15 @@
#include <agrum/core/utils_random.h>
namespace gum {
template < typename GUM_SCALAR >
GibbsOperator< GUM_SCALAR >::GibbsOperator(const IBayesNet< GUM_SCALAR >& BN,
Size nbr,
bool atRandom)
: _counting(0)
, _sampling_bn(BN)
, _hardEv(nullptr)
, _nbr(nbr)
, _atRandom(atRandom) {
__updateSamplingNodes();
GUM_CONSTRUCTOR(GibbsOperator);
}
template < typename GUM_SCALAR >
GibbsOperator< GUM_SCALAR >::GibbsOperator(const IBayesNet< GUM_SCALAR >& BN,
const NodeProperty< Idx >& hardEv,
const NodeProperty< Idx >* hardEv,
Size nbr,
bool atRandom)
: _counting(0)
, _sampling_bn(BN)
, _hardEv(&hardEv)
, _hardEv(hardEv)
, _nbr(nbr)
, _atRandom(atRandom) {
__updateSamplingNodes();
......
......@@ -116,8 +116,8 @@ namespace gum {
return _enabled_max_iter;
}
// stopping criterion on timeout If the criterion was disabled it will be
// enabled
// stopping criterion on timeout (in seconds)
// If the criterion was disabled it will be enabled
INLINE void ApproximationScheme::setMaxTime( double timeout ) {
if ( timeout <= 0. ) {
GUM_ERROR( OutOfLowerBound, "timeout should be >0." );
......
......@@ -21,26 +21,7 @@ bool __compareInference(const gum::BayesNet< GUM_SCALAR >& bn,
for (const auto& node : bn.nodes()) {
if (!inf.BN().dag().exists(node)) continue;
GUM_SCALAR e;
try {
e = lazy.posterior(node).KL(inf.posterior(node));
} catch (gum::FatalError) {
// 0 in a proba
try {
// we cannot use KL, we use quadratic error
e = (inf.posterior(node) - lazy.posterior(node)).sq().sum();
} catch (gum::FatalError) {
e = std::numeric_limits< GUM_SCALAR >::infinity();
}
}
catch (gum::NotFound e) {
continue;
}
GUM_SCALAR e = (inf.posterior(node) - lazy.posterior(node)).abs().max();
if (e > err) {
err = e;
......@@ -50,10 +31,10 @@ bool __compareInference(const gum::BayesNet< GUM_SCALAR >& bn,
argstr += " inf : " + inf.posterior(node).toString() + " \n";
}
}
/*if (err > errmax) {
if (err > errmax) {
GUM_TRACE(argstr);
GUM_TRACE(inf.messageApproximationScheme());
}*/