[pyAgrum] updating inference API

parent 9fc8b78f
......@@ -39,46 +39,45 @@
namespace gum {
/**
* GibbsKL2 computes the KL divergence betweens 2 BNs using an approximation
*pattern
*: GIBBS sampling.
*
* KL.process() computes KL(P||Q) using klPQ() and KL(Q||P) using klQP(). The
*computations are made once. The second is for free :)
* GibbsKL allows as well to compute in the same time the Hellinger distance
*(\f$
*\sqrt{\sum_i (\sqrt{p_i}-\sqrt{q_i})^2}\f$) (Kokolakis and Nanopoulos, 2001)
* and Bhattacharya distance (Kaylath,T. 1967)
*
* It may happen that P*ln(P/Q) is not computable (Q=0 and P!=0). In such a
*case, KL
*keeps working but trace this error (errorPQ() and errorQP()). In those cases,
*Hellinger distance approximation is under-evaluated.
*
* @warning : convergence and stop criteria are designed w.r.t the main
*computation
*: KL(P||Q). The 3 others have no guarantee.
*
* snippets :
* @code
* gum::KL base_kl(net1,net2);
* if (base_kl.difficulty()!=KL::HEAVY) {
* gum::BruteForceKL kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* } else {
* gum::GibbsKL2 kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* }
* @endcode
*/
template <typename GUM_SCALAR>
class GibbsKL : public KL<GUM_SCALAR>,
/**
* GibbsKL2 computes the KL divergence betweens 2 BNs using an approximation
*pattern
*: GIBBS sampling.
*
* KL.process() computes KL(P||Q) using klPQ() and KL(Q||P) using klQP(). The
*computations are made once. The second is for free :)
* GibbsKL allows as well to compute in the same time the Hellinger distance
*(\f$
*\sqrt{\sum_i (\sqrt{p_i}-\sqrt{q_i})^2}\f$) (Kokolakis and Nanopoulos, 2001)
* and Bhattacharya distance (Kaylath,T. 1967)
*
* It may happen that P*ln(P/Q) is not computable (Q=0 and P!=0). In such a
*case, KL
*keeps working but trace this error (errorPQ() and errorQP()). In those cases,
*Hellinger distance approximation is under-evaluated.
*
* @warning : convergence and stop criteria are designed w.r.t the main
*computation
*: KL(P||Q). The 3 others have no guarantee.
*
* snippets :
* @code
* gum::KL base_kl(net1,net2);
* if (base_kl.difficulty()!=KL::HEAVY) {
* gum::BruteForceKL kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* } else {
* gum::GibbsKL2 kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* }
* @endcode
*/
template < typename GUM_SCALAR >
class GibbsKL : public KL< GUM_SCALAR >,
public ApproximationScheme,
public GibbsOperator<GUM_SCALAR> {
public GibbsOperator< GUM_SCALAR > {
public:
/* no default constructor */
/** constructor must give 2 BNs
......@@ -88,34 +87,46 @@ namespace gum {
*/
GibbsKL( const IBayesNet<GUM_SCALAR>& P, const IBayesNet<GUM_SCALAR>& Q );
GibbsKL(const IBayesNet< GUM_SCALAR >& P, const IBayesNet< GUM_SCALAR >& Q);
/** copy constructor
*/
GibbsKL( const KL<GUM_SCALAR>& kl );
GibbsKL(const KL< GUM_SCALAR >& kl);
/** destructor */
~GibbsKL();
/**
* @brief Number of burn in for one iteration.
* @param b The number of burn in.
* @throw OutOfLowerBound Raised if b < 1.
*/
void setBurnIn(Size b) { this->_burn_in = b; };
/**
* @brief Returns the number of burn in.
* @return Returns the number of burn in.
*/
Size burnIn(void) const { return this->_burn_in; };
protected:
void _computeKL( void );
void _computeKL(void);
using KL<GUM_SCALAR>::_p;
using KL<GUM_SCALAR>::_q;
using KL<GUM_SCALAR>::_hellinger;
using KL<GUM_SCALAR>::_bhattacharya;
using KL< GUM_SCALAR >::_p;
using KL< GUM_SCALAR >::_q;
using KL< GUM_SCALAR >::_hellinger;
using KL< GUM_SCALAR >::_bhattacharya;
using KL<GUM_SCALAR>::_klPQ;
using KL<GUM_SCALAR>::_klQP;
using KL< GUM_SCALAR >::_klPQ;
using KL< GUM_SCALAR >::_klQP;
using KL<GUM_SCALAR>::_errorPQ;
using KL<GUM_SCALAR>::_errorQP;
using KL< GUM_SCALAR >::_errorPQ;
using KL< GUM_SCALAR >::_errorQP;
};
extern template class GibbsKL<float>;
extern template class GibbsKL<double>;
extern template class GibbsKL< float >;
extern template class GibbsKL< double >;
} // namespace gum
......
/***************************************************************************
* Copyright (C) 2005 by Pierre-Henri WUILLEMIN et Christophe GONZALES *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief KL divergence between BNs with GIBBS implementation
*
* @author Pierre-Henri WUILLEMIN
*/
#include <agrum/BN/algorithms/divergence/GibbsKLold.h>
#include <cmath>
template class gum::GibbsKLold<float>;
template class gum::GibbsKLold<double>;
/***************************************************************************
* Copyright (C) 2005 by Christophe GONZALES and Pierre-Henri WUILLEMIN *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief algorithm for approximated computation KL divergence between BNs using
*GIBBS
*sampling
*
* @author Pierre-Henri WUILLEMIN
*
*/
#ifndef GUM_GIBBS_KL_H
#define GUM_GIBBS_KL_H
#include <agrum/BN/algorithms/divergence/KL.h>
#include <agrum/BN/samplers/GibbsSampler.h>
#include <agrum/core/approximations/approximationScheme.h>
#include <agrum/core/signal/signaler.h>
namespace gum {
/**
* GibbsKL computes the KL divergence betweens 2 BNs using an approximation
*pattern
*: GIBBS sampling.
*
* KL.process() computes KL(P||Q) using klPQ() and KL(Q||P) using klQP(). The
*computations are made once. The second is for free :)
* GibbsKL allows as well to compute in the same time the Hellinger distance
*(\f$
*\sqrt{\sum_i (\sqrt{p_i}-\sqrt{q_i})^2}\f$) (Kokolakis and Nanopoulos, 2001)
* and Bhattacharya distance (Kaylath,T. 1967)
*
* It may happen that P*ln(P/Q) is not computable (Q=0 and P!=0). In such a
*case, KL
*keeps working but trace this error (errorPQ() and errorQP()). In those cases,
*Hellinger distance approximation is under-evaluated.
*
* @warning : convergence and stop criteria are designed w.r.t the main
*computation
*: KL(P||Q). The 3 others have no guarantee.
*
* snippets :
* @code
* gum::KL base_kl(net1,net2);
* if (base_kl.difficulty()!=KL::HEAVY) {
* gum::BruteForceKL kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* } else {
* gum::GibbsKL kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* }
* @endcode
*/
template <typename GUM_SCALAR>
class GibbsKLold : public KL<GUM_SCALAR>,
public ApproximationScheme,
public samplers::GibbsSampler<GUM_SCALAR> {
public:
/* no default constructor */
/** constructor must give 2 BNs
* @throw gum::OperationNotAllowed if the 2 BNs have not the same domainSize
* or
* compatible node sets.
*/
GibbsKLold( const IBayesNet<GUM_SCALAR>& P, const IBayesNet<GUM_SCALAR>& Q );
/** copy constructor
*/
GibbsKLold( const KL<GUM_SCALAR>& kl );
/** destructor */
~GibbsKLold();
using samplers::GibbsSampler<GUM_SCALAR>::particle;
using samplers::GibbsSampler<GUM_SCALAR>::initParticle;
using samplers::GibbsSampler<GUM_SCALAR>::nextParticle;
using samplers::GibbsSampler<GUM_SCALAR>::bn;
protected:
void _computeKL( void );
using KL<GUM_SCALAR>::_p;
using KL<GUM_SCALAR>::_q;
using KL<GUM_SCALAR>::_hellinger;
using KL<GUM_SCALAR>::_bhattacharya;
using KL<GUM_SCALAR>::_klPQ;
using KL<GUM_SCALAR>::_klQP;
using KL<GUM_SCALAR>::_errorPQ;
using KL<GUM_SCALAR>::_errorQP;
};
extern template class GibbsKLold<float>;
extern template class GibbsKLold<double>;
} // namespace gum
#include <agrum/BN/algorithms/divergence/GibbsKLold_tpl.h>
#endif // GUM_GIBBS_KL_H
/***************************************************************************
* Copyright (C) 2005 by Pierre-Henri WUILLEMIN et Christophe GONZALES *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief KL divergence between BNs -- implementation using Gibbs sampling
*
* @author Pierre-Henri WUILLEMIN
*/
#include <cmath>
#include <agrum/BN/IBayesNet.h>
#include <agrum/core/hashTable.h>
#include <agrum/BN/algorithms/divergence/GibbsKLold.h>
#include <agrum/BN/samplers/GibbsSampler.h>
#include <agrum/core/approximations/approximationScheme.h>
#define KL_DEFAULT_MAXITER 10000000
#define KL_DEFAULT_EPSILON 1e-10
#define KL_DEFAULT_MIN_EPSILON_RATE 1e-10
#define KL_DEFAULT_PERIOD_SIZE 500
#define KL_DEFAULT_VERBOSITY false
#define KL_DEFAULT_BURNIN 3000
namespace gum {
template <typename GUM_SCALAR>
GibbsKLold<GUM_SCALAR>::GibbsKLold( const IBayesNet<GUM_SCALAR>& P,
const IBayesNet<GUM_SCALAR>& Q )
: KL<GUM_SCALAR>( P, Q )
, ApproximationScheme()
, samplers::GibbsSampler<GUM_SCALAR>( P ) {
GUM_CONSTRUCTOR( GibbsKLold );
setEpsilon( KL_DEFAULT_EPSILON );
setMinEpsilonRate( KL_DEFAULT_MIN_EPSILON_RATE );
setMaxIter( KL_DEFAULT_MAXITER );
setVerbosity( KL_DEFAULT_VERBOSITY );
setBurnIn( KL_DEFAULT_BURNIN );
setPeriodSize( KL_DEFAULT_PERIOD_SIZE );
}
template <typename GUM_SCALAR>
GibbsKLold<GUM_SCALAR>::GibbsKLold( const KL<GUM_SCALAR>& kl )
: KL<GUM_SCALAR>( kl )
, ApproximationScheme()
, samplers::GibbsSampler<GUM_SCALAR>( kl.p() ) {
GUM_CONSTRUCTOR( GibbsKLold );
setEpsilon( KL_DEFAULT_EPSILON );
setMinEpsilonRate( KL_DEFAULT_MIN_EPSILON_RATE );
setMaxIter( KL_DEFAULT_MAXITER );
setVerbosity( KL_DEFAULT_VERBOSITY );
setBurnIn( KL_DEFAULT_BURNIN );
setPeriodSize( KL_DEFAULT_PERIOD_SIZE );
}
template <typename GUM_SCALAR>
GibbsKLold<GUM_SCALAR>::~GibbsKLold() {
GUM_DESTRUCTOR( GibbsKLold );
}
template <typename GUM_SCALAR>
void GibbsKLold<GUM_SCALAR>::_computeKL() {
gum::Instantiation Iq;
_q.completeInstantiation( Iq );
initParticle();
initApproximationScheme();
// map between particle() variables and _q variables (using name of vars)
HashTable<const DiscreteVariable*, const DiscreteVariable*> map;
for ( Idx ite = 0; ite < particle().nbrDim(); ++ite ) {
map.insert( &particle().variable( ite ),
&_q.variableFromName( particle().variable( ite ).name() ) );
}
// BURN IN
for ( Idx i = 0; i < burnIn(); i++ )
nextParticle();
// SAMPLING
_klPQ = _klQP = _hellinger = (GUM_SCALAR)0.0;
_errorPQ = _errorQP = 0;
/// bool check_rate;
GUM_SCALAR delta, ratio, error;
delta = ratio = error = (GUM_SCALAR)-1;
GUM_SCALAR oldPQ = 0.0;
GUM_SCALAR pp, pq;
do {
/// check_rate=false;
this->disableMinEpsilonRate(); // replace check_rate = false
nextParticle();
updateApproximationScheme();
//_p.synchroInstantiations( Ip,particle() );
Iq.setValsFrom( map, particle() );
pp = _p.jointProbability( particle() );
pq = _q.jointProbability( Iq );
if ( pp != (GUM_SCALAR)0.0 ) {
_hellinger += std::pow( std::sqrt( pp ) - std::sqrt( pq ), 2 ) / pp;
if ( pq != (GUM_SCALAR)0.0 ) {
_bhattacharya += std::sqrt( pq / pp ); // std::sqrt(pp*pq)/pp
/// check_rate=true;
this->enableMinEpsilonRate(); // replace check_rate=true;
ratio = pq / pp;
delta = (GUM_SCALAR)log2( ratio );
_klPQ += delta;
} else {
_errorPQ++;
}
}
if ( pq != (GUM_SCALAR)0.0 ) {
if ( pp != (GUM_SCALAR)0.0 ) {
// if we are here, it is certain that delta and ratio have been
// computed
// further lines above. (for now #112-113)
_klQP += ( GUM_SCALAR )( -delta * ratio );
} else {
_errorQP++;
}
}
if ( this->isEnabledMinEpsilonRate() /* replace check_rate */ ) {
// delta is used as a temporary variable
delta = _klPQ / nbrIterations();
error = (GUM_SCALAR)std::abs( delta - oldPQ );
oldPQ = delta;
}
} while ( continueApproximationScheme( error /*,check_rate*/ ) );
_klPQ = -_klPQ / ( nbrIterations() );
_klQP = -_klQP / ( nbrIterations() );
_hellinger = std::sqrt( _hellinger / nbrIterations() );
_bhattacharya = -std::log( _bhattacharya );
}
} // namespace gum
// kate: indent-mode cstyle; indent-width 2; replace-tabs on;
......@@ -28,8 +28,8 @@
#ifndef GUM_GIBBS_SAMPLING_H
#define GUM_GIBBS_SAMPLING_H
#include <agrum/BN/inference/tools/samplingInference.h>
#include <agrum/BN/inference/tools/gibbsOperator.h>
#include <agrum/BN/inference/tools/samplingInference.h>
namespace gum {
......@@ -48,21 +48,34 @@ namespace gum {
*
*/
template <typename GUM_SCALAR>
class GibbsSampling : public SamplingInference<GUM_SCALAR>,
public GibbsOperator<GUM_SCALAR> {
template < typename GUM_SCALAR >
class GibbsSampling : public SamplingInference< GUM_SCALAR >,
public GibbsOperator< GUM_SCALAR > {
public:
/**
* Default constructor
*/
GibbsSampling( const IBayesNet<GUM_SCALAR>* BN );
GibbsSampling(const IBayesNet< GUM_SCALAR >* bn);
/**
* Destructor
*/
virtual ~GibbsSampling();
/**
* @brief Number of burn in for one iteration.
* @param b The number of burn in.
* @throw OutOfLowerBound Raised if b < 1.
*/
void setBurnIn(Size b) { this->_burn_in = b; };
/**
* @brief Returns the number of burn in.
* @return Returns the number of burn in.
*/
Size burnIn(void) const { return this->_burn_in; };
protected:
/// draws a defined number of samples without updating the estimators
virtual Instantiation _burnIn();
......@@ -81,7 +94,7 @@ namespace gum {
* sample, given the instantiation of all other nodes. It requires computing of
* P( x \given instantiation_markovblanket(x)).
*/
virtual Instantiation _draw( float* w, Instantiation prev );
virtual Instantiation _draw(float* w, Instantiation prev);
/// draws a Monte Carlo sample
/**
......@@ -97,8 +110,8 @@ namespace gum {
virtual Instantiation _monteCarloSample();
};
extern template class GibbsSampling<float>;
extern template class GibbsSampling<double>;
extern template class GibbsSampling< float >;
extern template class GibbsSampling< double >;
}
#include <agrum/BN/inference/GibbsSampling_tpl.h>
......
......@@ -26,23 +26,23 @@
#include <agrum/BN/inference/GibbsSampling.h>
#define GIBBS_SAMPLING_DEFAULT_EPSILON 1e-4 * std::log(2)
#define GIBBS_SAMPLING_DEFAULT_MIN_EPSILON_RATE 1e-6 * std::log(2)
#define GIBBS_SAMPLING_DEFAULT_BURNIN 1000
#define GIBBS_SAMPLING_DEFAULT_EPSILON std::exp(-1.6)
#define GIBBS_SAMPLING_DEFAULT_MIN_EPSILON_RATE std::exp(-5)
#define GIBBS_SAMPLING_DEFAULT_BURNIN 300
#define GIBBS_SAMPLING_POURCENT_DRAWN_SAMPLE 50 // percent drawn
#define GIBBS_SAMPLING_DRAWN_AT_RANDOM false
#define GIBBS_SAMPLING_DRAWN_AT_RANDOM true
namespace gum {
/// default constructor
template < typename GUM_SCALAR >
GibbsSampling< GUM_SCALAR >::GibbsSampling(const IBayesNet< GUM_SCALAR >* BN)
: SamplingInference< GUM_SCALAR >(BN)
GibbsSampling< GUM_SCALAR >::GibbsSampling(const IBayesNet< GUM_SCALAR >* bn)
: SamplingInference< GUM_SCALAR >(bn)
, GibbsOperator< GUM_SCALAR >(
*BN,
*bn,
&this->hardEvidence(),
1 + (BN->size() * GIBBS_SAMPLING_POURCENT_DRAWN_SAMPLE / 100),
1 + (bn->size() * GIBBS_SAMPLING_POURCENT_DRAWN_SAMPLE / 100),
GIBBS_SAMPLING_DRAWN_AT_RANDOM) {
GUM_CONSTRUCTOR(GibbsSampling);
......
......@@ -52,7 +52,7 @@ namespace gum {
/**
* Default constructor
*/
MonteCarloSampling(const IBayesNet<GUM_SCALAR> *BN);
MonteCarloSampling(const IBayesNet<GUM_SCALAR> *bn);
/**
* Destructor
......
......@@ -34,10 +34,8 @@ namespace gum {
/// Default constructor
template < typename GUM_SCALAR >
MonteCarloSampling< GUM_SCALAR >::MonteCarloSampling(
const IBayesNet< GUM_SCALAR >* BN)
: SamplingInference< GUM_SCALAR >(BN) {
this->setBurnIn(0);
const IBayesNet< GUM_SCALAR >* bn)
: SamplingInference< GUM_SCALAR >(bn) {
GUM_CONSTRUCTOR(MonteCarloSampling);
}
......
......@@ -28,11 +28,11 @@
#ifndef GUM_HYBRID_INFERENCE_H
#define GUM_HYBRID_INFERENCE_H
#include <agrum/BN/inference/tools/marginalTargetedInference.h>
#include <agrum/BN/inference/tools/approximateInference.h>
#include <agrum/BN/inference/GibbsSampling.h>
#include <agrum/BN/inference/MonteCarloSampling.h>
#include <agrum/BN/inference/importanceSampling.h>
#include <agrum/BN/inference/tools/approximateInference.h>
#include <agrum/BN/inference/tools/marginalTargetedInference.h>
#include <agrum/BN/inference/weightedSampling.h>
namespace gum {
......@@ -93,6 +93,18 @@ namespace gum {
extern template class HybridApproxInference< float, GibbsSampling >;
extern template class HybridApproxInference< double, GibbsSampling >;
template < typename GUM_SCALAR >
using HybridMonteCarloSampling =
HybridApproxInference< GUM_SCALAR, MonteCarloSampling >;
template < typename GUM_SCALAR >
using HybridWeightedSampling =
HybridApproxInference< GUM_SCALAR, WeightedSampling >;
template < typename GUM_SCALAR >
using HybridImportanceSampling =
HybridApproxInference< GUM_SCALAR, ImportanceSampling >;
template < typename GUM_SCALAR >
using HybridGibbsSampling = HybridApproxInference< GUM_SCALAR, GibbsSampling >;
}
#include <agrum/BN/inference/hybridApproxInference_tpl.h>
......
......@@ -52,7 +52,7 @@ namespace gum {
/**
* Default constructor
*/
ImportanceSampling( const IBayesNet<GUM_SCALAR>* BN );
ImportanceSampling( const IBayesNet<GUM_SCALAR>* bn );
/**
......
......@@ -33,10 +33,9 @@ namespace gum {
/// default constructor
template < typename GUM_SCALAR >
ImportanceSampling< GUM_SCALAR >::ImportanceSampling(
const IBayesNet< GUM_SCALAR >* BN)
: SamplingInference< GUM_SCALAR >(BN) {
const IBayesNet< GUM_SCALAR >* bn)
: SamplingInference< GUM_SCALAR >(bn) {
this->setBurnIn(0);
GUM_CONSTRUCTOR(ImportanceSampling);
}
......
......@@ -53,7 +53,6 @@ namespace gum {
this->setMaxIter( LBP_DEFAULT_MAXITER );
this->setVerbosity( LBP_DEFAULT_VERBOSITY );
this->setPeriodSize( LBP_DEFAULT_PERIOD_SIZE );
this->setBurnIn(0); //no burn in for LBP
__init_messages();
}
......
......@@ -33,19 +33,6 @@
namespace gum {
/**
* @class ApproximateInference approximateInference.h
* <agrum/BN/inference/approximateInference.h>
* @brief A generic class for making approximate inference in bayesian networks
* adapted
*
* @ingroup bn_approximation
*
* The goal of this class is to define the general scheme used by all
* approximate inference algorithms, which are implemented as derived classes of ApproximateInference.
* This class inherits from MarginalTargetedInference for the handling of
* marginal targets and from ApproximationScheme.
*/
template < typename GUM_SCALAR >
class ApproximateInference : public MarginalTargetedInference< GUM_SCALAR >,
public ApproximationScheme {
......
......@@ -112,7 +112,7 @@ namespace gum {
GUM_SCALAR _wtotal;
/// number of generated samples
int _ntotal;
Size _ntotal;
/// bayesian network on which approximation is done
const IBayesNet<GUM_SCALAR>* _bn;
......
......@@ -32,7 +32,7 @@ namespace gum {
GUM_CONSTRUCTOR(Estimator);
_wtotal = (GUM_SCALAR)0.;
_ntotal = 0;
_ntotal = (Size)0;
_bn = nullptr;
}
......@@ -111,6 +111,8 @@ namespace gum {
_estimator.insert(lbp->BN().variable(node).name(), v);
}
}
_ntotal = (Size)virtualLBPSize;
_wtotal = virtualLBPSize;
}
/*update the Estimator given an instantiation I with weight bias w*/
......@@ -119,7 +121,7 @@ namespace gum {
void Estimator< GUM_SCALAR >::update(Instantiation I, GUM_SCALAR w) {