[aGrUM] adding new sampling inference framework + MonteCarlo, Gibbs, Weighted,...

[aGrUM] adding new sampling inference framework + MonteCarlo, Gibbs, Weighted, Importance sampling + a new hybrid approx inference (i.e. LBP+sampling)
parent cdac1331
#include <agrum/BN/algorithms/divergence/gibbsKL2.h>
#include <cmath>
template class gum::GibbsKL2<float>;
template class gum::GibbsKL2<double>;
/***************************************************************************
* Copyright (C) 2005 by Christophe GONZALES and Pierre-Henri WUILLEMIN *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief algorithm for approximated computation KL divergence between BNs using
*GIBBS
*sampling
*
* @author Paul ALAM & Pierre-Henri WUILLEMIN
*
*/
#ifndef GUM_GIBBS_KL2_H
#define GUM_GIBBS_KL2_H
#include <agrum/BN/algorithms/divergence/KL.h>
#include <agrum/BN/inference/tools/gibbsOperator.h>
#include <agrum/core/approximations/approximationScheme.h>
#include <agrum/core/signal/signaler.h>
namespace gum {
/**
* GibbsKL2 computes the KL divergence betweens 2 BNs using an approximation
*pattern
*: GIBBS sampling.
*
* KL.process() computes KL(P||Q) using klPQ() and KL(Q||P) using klQP(). The
*computations are made once. The second is for free :)
* GibbsKL allows as well to compute in the same time the Hellinger distance
*(\f$
*\sqrt{\sum_i (\sqrt{p_i}-\sqrt{q_i})^2}\f$) (Kokolakis and Nanopoulos, 2001)
* and Bhattacharya distance (Kaylath,T. 1967)
*
* It may happen that P*ln(P/Q) is not computable (Q=0 and P!=0). In such a
*case, KL
*keeps working but trace this error (errorPQ() and errorQP()). In those cases,
*Hellinger distance approximation is under-evaluated.
*
* @warning : convergence and stop criteria are designed w.r.t the main
*computation
*: KL(P||Q). The 3 others have no guarantee.
*
* snippets :
* @code
* gum::KL base_kl(net1,net2);
* if (base_kl.difficulty()!=KL::HEAVY) {
* gum::BruteForceKL kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* } else {
* gum::GibbsKL2 kl(base_kl);
* std::cout<<"KL net1||net2 :"<<kl.klPQ()<<std::endl;
* }
* @endcode
*/
template <typename GUM_SCALAR>
class GibbsKL2 : public KL<GUM_SCALAR>,
public ApproximationScheme,
public GibbsOperator<GUM_SCALAR> {
public:
/* no default constructor */
/** constructor must give 2 BNs
* @throw gum::OperationNotAllowed if the 2 BNs have not the same domainSize
* or
* compatible node sets.
*/
GibbsKL2( const IBayesNet<GUM_SCALAR>& P, const IBayesNet<GUM_SCALAR>& Q );
/** copy constructor
*/
GibbsKL2( const KL<GUM_SCALAR>& kl );
/** destructor */
~GibbsKL2();
protected:
void _computeKL( void );
using KL<GUM_SCALAR>::_p;
using KL<GUM_SCALAR>::_q;
using KL<GUM_SCALAR>::_hellinger;
using KL<GUM_SCALAR>::_bhattacharya;
using KL<GUM_SCALAR>::_klPQ;
using KL<GUM_SCALAR>::_klQP;
using KL<GUM_SCALAR>::_errorPQ;
using KL<GUM_SCALAR>::_errorQP;
};
extern template class GibbsKL2<float>;
extern template class GibbsKL2<double>;
} // namespace gum
#include <agrum/BN/algorithms/divergence/gibbsKL2_tpl.h>
#endif
/***************************************************************************
* Copyright (C) 2005 by Pierre-Henri WUILLEMIN et Christophe GONZALES *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief KL divergence between BNs -- implementation using Gibbs sampling
*
* @author Paul ALAM & Pierre-Henri WUILLEMIN
*/
#include <agrum/BN/IBayesNet.h>
#include <agrum/BN/algorithms/divergence/gibbsKL2.h>
#include <agrum/BN/inference/tools/gibbsOperator.h>
#include <agrum/core/approximations/approximationScheme.h>
#include <agrum/core/hashTable.h>
#include <cmath>
#define GIBBSKL_DEFAULT_MAXITER 10000000
#define GIBBSKL_DEFAULT_EPSILON 1e-10
#define GIBBSKL_DEFAULT_MIN_EPSILON_RATE 1e-10
#define GIBBSKL_DEFAULT_PERIOD_SIZE 200
#define GIBBSKL_DEFAULT_VERBOSITY false
#define GIBBSKL_DEFAULT_BURNIN 2000
#define GIBBSKL_DEFAULT_TIMEOUT 6000
namespace gum {
template <typename GUM_SCALAR>
GibbsKL2<GUM_SCALAR>::GibbsKL2( const IBayesNet<GUM_SCALAR>& P,
const IBayesNet<GUM_SCALAR>& Q )
: KL<GUM_SCALAR>( P, Q )
, ApproximationScheme()
, GibbsOperator<GUM_SCALAR>( P ) {
GUM_CONSTRUCTOR( GibbsKL2 );
setEpsilon( GIBBSKL_DEFAULT_EPSILON );
setMinEpsilonRate( GIBBSKL_DEFAULT_MIN_EPSILON_RATE );
setMaxIter( GIBBSKL_DEFAULT_MAXITER );
setVerbosity( GIBBSKL_DEFAULT_VERBOSITY );
setBurnIn( GIBBSKL_DEFAULT_BURNIN );
setPeriodSize( GIBBSKL_DEFAULT_PERIOD_SIZE );
setMaxTime( GIBBSKL_DEFAULT_TIMEOUT );
}
template <typename GUM_SCALAR>
GibbsKL2<GUM_SCALAR>::GibbsKL2( const KL<GUM_SCALAR>& kl )
: KL<GUM_SCALAR>( kl )
, ApproximationScheme()
, GibbsOperator<GUM_SCALAR>( kl.p() ) {
GUM_CONSTRUCTOR( GibbsKL2 );
setEpsilon( GIBBSKL_DEFAULT_EPSILON );
setMinEpsilonRate( GIBBSKL_DEFAULT_MIN_EPSILON_RATE );
setMaxIter( GIBBSKL_DEFAULT_MAXITER );
setVerbosity( GIBBSKL_DEFAULT_VERBOSITY );
setBurnIn( GIBBSKL_DEFAULT_BURNIN );
setPeriodSize( GIBBSKL_DEFAULT_PERIOD_SIZE );
setMaxTime( GIBBSKL_DEFAULT_TIMEOUT );
}
template <typename GUM_SCALAR>
GibbsKL2<GUM_SCALAR>::~GibbsKL2() {
GUM_DESTRUCTOR( GibbsKL2 );
}
template <typename GUM_SCALAR>
void GibbsKL2<GUM_SCALAR>::_computeKL() {
gum::Instantiation Iq;
_q.completeInstantiation( Iq );
gum::Instantiation I = this->_monteCarloSample( this->_p ); // p or q ?
initApproximationScheme();
// map between particle() variables and _q variables (using name of vars)
HashTable<const DiscreteVariable*, const DiscreteVariable*> map;
for ( Idx ite = 0; ite < I.nbrDim(); ++ite ) {
map.insert( &I.variable( ite ),
&_q.variableFromName( I.variable( ite ).name() ) );
}
float w = 1.;
// BURN IN
for ( Idx i = 0; i < burnIn(); i++ )
I = this->drawNextInstance( &w, I, this->_p );
// SAMPLING
_klPQ = _klQP = _hellinger = (GUM_SCALAR)0.0;
_errorPQ = _errorQP = 0;
/// bool check_rate;
GUM_SCALAR delta, ratio, error;
delta = ratio = error = (GUM_SCALAR)-1;
GUM_SCALAR oldPQ = 0.0;
GUM_SCALAR pp, pq;
do {
this->disableMinEpsilonRate();
I = this->drawNextInstance( &w, I, this->_p );
updateApproximationScheme();
//_p.synchroInstantiations( Ip,I);
Iq.setValsFrom( map, I );
pp = _p.jointProbability( I );
pq = _q.jointProbability( Iq );
if ( pp != (GUM_SCALAR)0.0 ) {
_hellinger += std::pow( std::sqrt( pp ) - std::sqrt( pq ), 2 ) / pp;
if ( pq != (GUM_SCALAR)0.0 ) {
_bhattacharya += std::sqrt( pq / pp ); // std::sqrt(pp*pq)/pp
/// check_rate=true;
this->enableMinEpsilonRate(); // replace check_rate=true;
ratio = pq / pp;
delta = (GUM_SCALAR)log2( ratio );
_klPQ += delta;
} else {
_errorPQ++;
}
}
if ( pq != (GUM_SCALAR)0.0 ) {
if ( pp != (GUM_SCALAR)0.0 ) {
// if we are here, it is certain that delta and ratio have been
// computed
// further lines above. (for now #112-113)
_klQP += ( GUM_SCALAR )( -delta * ratio );
} else {
_errorQP++;
}
}
if ( this->isEnabledMinEpsilonRate() ) { // replace check_rate
// delta is used as a temporary variable
delta = _klPQ / nbrIterations();
error = (GUM_SCALAR)std::abs( delta - oldPQ );
oldPQ = delta;
}
} while ( continueApproximationScheme( error ) ); //
_klPQ = -_klPQ / ( nbrIterations() );
_klQP = -_klQP / ( nbrIterations() );
_hellinger = std::sqrt( _hellinger / nbrIterations() );
_bhattacharya = -std::log( _bhattacharya );
}
}
......@@ -25,7 +25,7 @@
#ifndef GUM_GIBBS_INFERENCE_H
#define GUM_GIBBS_INFERENCE_H
#include <agrum/BN/inference/marginalTargetedInference.h>
#include <agrum/BN/inference/tools/marginalTargetedInference.h>
#include <agrum/BN/samplers/GibbsSampler.h>
#include <agrum/core/approximations/approximationScheme.h>
......
......@@ -35,7 +35,7 @@
#define GIBBS_DEFAULT_BURNIN 3000
// to ease parsing for IDE
#include <agrum/BN/inference/BayesNetInference.h>
#include <agrum/BN/inference/tools/BayesNetInference.h>
#include <agrum/BN/inference/GibbsInference.h>
#include <agrum/BN/samplers/GibbsSampler.h>
......
#include <agrum/BN/inference/GibbsSampling.h>
template class gum::GibbsApproxInference<float>;
template class gum::GibbsApproxInference<double>;
/***************************************************************************
* Copyright (C) 2005 by Pierre-Henri WUILLEMIN et Christophe GONZALES *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief This file contains Gibbs sampling class definition.
*
* @author Paul ALAM & Pierre-Henri WUILLEMIN
*/
#ifndef GUM_GIBBS_INFERENCE_2_H
#define GUM_GIBBS_INFERENCE_2_H
#include <agrum/BN/inference/tools/approximateInference.h>
#include <agrum/BN/inference/tools/gibbsOperator.h>
namespace gum {
/**
* @class GibbsApproxInference gibbsApproxInference.h
*<agrum/BN/inference/gibbsApproxInference.h>
* @brief class for making Gibbs sampling inference in bayesian networks.
* @ingroup bn_approximation
*
* This class overrides pure function declared in the inherited class
*ApproximateInference.
* It defines the way Gibbs sampling draws a sample. It also inherits
*GibbsOperator
* which contains Gibbs sampling methods.
*
*/
template <typename GUM_SCALAR>
class GibbsApproxInference : public ApproximateInference<GUM_SCALAR>,
public GibbsOperator<GUM_SCALAR> {
public:
/**
* Default constructor
*/
GibbsApproxInference( const IBayesNet<GUM_SCALAR>* BN );
/**
* Destructor
*/
virtual ~GibbsApproxInference();
protected:
/// draws a defined number of samples without updating the estimators
virtual Instantiation _burnIn();
/// draws a sample given previous one according to Gibbs sampling
/**
* @param w the weight of sample being generated
* @param prev the previous sample generated
* @param bn the bayesian network containing the evidence
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Uses the Gibbs sampling method to generate a new sample given the previous
* one.
* The method is implemented in the inherited class GibbsOperator. This function
* only makes the
* call to it.
*It consists of choosing one node x to sample, given the instantiation of all other
* nodes.
*It requires computing of P( x \given instantiation_markovblanket(x)).
*/
virtual Instantiation
_draw( float* w,
Instantiation prev = NULL,
const IBayesNet<GUM_SCALAR>& bn = BayesNet<GUM_SCALAR>(),
const NodeSet& hardEvNodes = NodeSet(),
const NodeProperty<Idx>& hardEv = NodeProperty<Idx>() );
/// draws a Monte Carlo sample
/**
* @param bn the reference bayesian network
*
* This Monte Carlo sample generates a good starting point for Gibbs sampling,
* because it returns
* a sample consistent with the evidence, but it differs from the one
* implemented in the inherited
* class Approximate Inference because it also initializes attributes needed for
* Gibbs sampling.
*/
virtual Instantiation _monteCarloSample( const IBayesNet<GUM_SCALAR>& bn );
/// fired when Bayesian network is contextualized
/**
* @param bn the contextualized BayesNetFragment
* @param targets inference target variables
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Adds the evidence and target variables.
*
*/
virtual void _onContextualize( BayesNetFragment<GUM_SCALAR>* bn,
const NodeSet& targets,
const NodeSet& hardEvNodes,
const NodeProperty<Idx>& hardEv );
virtual void _onEvidenceAdded( const NodeId id, bool isHardEvidence );
virtual void _onEvidenceErased( const NodeId id, bool isHardEvidence );
virtual void _onAllEvidenceErased( bool contains_hard_evidence );
virtual void _onEvidenceChanged( const NodeId id, bool hasChangedSoftHard );
};
extern template class GibbsApproxInference<float>;
extern template class GibbsApproxInference<double>;
}
#include <agrum/BN/inference/GibbsSampling_tpl.h>
#endif
/***************************************************************************
* Copyright (C) 2005 by Christophe GONZALES et Pierre-Henri WUILLEMIN *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief Implementation of Gibbs Sampling for inference in Bayesian Networks.
*
* @author Paul ALAM & Pierre-Henri WUILLEMIN
*/
#include <agrum/BN/inference/GibbsSampling.h>
namespace gum {
/// default constructor
template <typename GUM_SCALAR>
GibbsApproxInference<GUM_SCALAR>::GibbsApproxInference(const IBayesNet<GUM_SCALAR>* BN)
: ApproximateInference<GUM_SCALAR>(BN), GibbsOperator<GUM_SCALAR>(*BN) {
GUM_CONSTRUCTOR(GibbsApproxInference);
}
/// destructor
template <typename GUM_SCALAR>
GibbsApproxInference<GUM_SCALAR>::~GibbsApproxInference() {
GUM_DESTRUCTOR(GibbsApproxInference);
}
template <typename GUM_SCALAR>
Instantiation GibbsApproxInference<GUM_SCALAR>::_monteCarloSample(const IBayesNet<GUM_SCALAR>& bn){
return GibbsOperator<GUM_SCALAR>::_monteCarloSample(bn);
}
template <typename GUM_SCALAR>
Instantiation GibbsApproxInference<GUM_SCALAR>::_burnIn(){
gum::Instantiation Ip;
float w = 1.;
Ip = _monteCarloSample(this->BN());
if (this->burnIn() == 0)
return Ip;
for (Size i = 1; i < this->burnIn(); i++)
Ip = this->_draw(&w, Ip);
return Ip;
}
/// draws next sample for gibbs sampling
template <typename GUM_SCALAR>
Instantiation GibbsApproxInference<GUM_SCALAR>::_draw(float* w, Instantiation prev, const IBayesNet<GUM_SCALAR>& bn, const NodeSet& hardEvNodes, const NodeProperty<Idx>& hardEv){
return this->drawNextInstance(w, prev, this->BN());
}
/* initializing node properties after contextualizing the BN in order for the computation to be lighter */
template <typename GUM_SCALAR>
void GibbsApproxInference<GUM_SCALAR>::_onContextualize(BayesNetFragment<GUM_SCALAR>* bn, const NodeSet& targets, const NodeSet& hardEvNodes, const NodeProperty<Idx>& hardEv) {
for (auto targ = targets.begin(); targ != targets.end(); ++targ)
this->addTarget(*targ);
for (auto ev = hardEvNodes.begin(); ev != hardEvNodes.end(); ++ev)
this->addEvidence(*ev, hardEv[*ev]);
}
template <typename GUM_SCALAR>
INLINE void GibbsApproxInference<GUM_SCALAR>::_onEvidenceAdded( const NodeId id,
bool isHardEvidence ) {
if ( isHardEvidence )
this->addHardEvidence( id, this->hardEvidence()[id] );
else {
this->addSoftEvidence( *( this->evidence()[id] ) );
}
}
template <typename GUM_SCALAR>
INLINE void GibbsApproxInference<GUM_SCALAR>::_onEvidenceErased( const NodeId id,
bool isHardEvidence ) {
if ( isHardEvidence )
this->eraseHardEvidence( id );
}
template <typename GUM_SCALAR>
INLINE void GibbsApproxInference<GUM_SCALAR>::_onAllEvidenceErased( bool contains_hard_evidence ){
this->eraseAllEvidenceOperator();
}
template <typename GUM_SCALAR>
INLINE void
GibbsApproxInference<GUM_SCALAR>::_onEvidenceChanged( const NodeId id,
bool hasChangedSoftHard ) {
if ( this->hardEvidence().exists( id ) ) {
// soft evidence has been removed
this->eraseSoftEvidence( id );
this->addHardEvidence( id, this->hardEvidence()[id] );
} else {
// hard evidence has been removed
this->eraseHardEvidence( id );
this->addSoftEvidence( *( this->evidence()[id] ) );
}
}
}
#include <agrum/BN/inference/MonteCarloSampling.h>
template class gum::MonteCarloApproxInference<float>;
template class gum::MonteCarloApproxInference<double>;
/***************************************************************************
* Copyright (C) 2005 by Pierre-Henri WUILLEMIN et Christophe GONZALES *
* {prenom.nom}_at_lip6.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
* @file
* @brief This file contains Monte Carlo sampling class definition.
*
* @author Paul ALAM & Pierre-Henri WUILLEMIN
*/
#ifndef GUM_MONTE_CARLO_INFERENCE_H
#define GUM_MONTE_CARLO_INFERENCE_H
#include <agrum/BN/inference/tools/approximateInference.h>
namespace gum {
/**
* @class MonteCarloInference monteCarloInference.h
*<agrum/BN/inference/monteCarloInference.h>
* @brief class for making Monte Carlo sampling inference in bayesian networks.
* @ingroup bn_approximation
*
* This class overrides pure function declared in the inherited class ApproximateInference.
* It defines the way Monte Carlo sampling draws a sample.
*
*/
template <typename GUM_SCALAR>
class MonteCarloApproxInference : public ApproximateInference<GUM_SCALAR> {
public:
/**
* Default constructor
*/
MonteCarloApproxInference(const IBayesNet<GUM_SCALAR>* BN);
/**
* Destructor
*/
virtual ~MonteCarloApproxInference();
protected:
/// draws a defined number of samples without updating the estimators
virtual Instantiation _burnIn ();
/// draws a sample according to classic Monte Carlo sampling
/**
* @param w the weight of sample being generated
* @param prev the previous sample generated
* @param bn the bayesian network containing the evidence
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Generates a new sample using forward sampling, rejecting
* samples not consistent with evidence
*
*/
virtual Instantiation _draw (float* w , Instantiation prev = NULL, const IBayesNet<GUM_SCALAR>& bn = BayesNet<GUM_SCALAR>(), const NodeSet& hardEvNodes = NodeSet(), const NodeProperty<Idx>& hardEv = NodeProperty<Idx>());
///fired when Bayesian network is contextualized
/**
* @param bn the contextualized BayesNetFragment
* @param targets inference target variables