[aGrUM] refactor of some parts in sampling inference classes

parent 60cffed6
......@@ -10,6 +10,7 @@ ColumnLimit: 83
MaxEmptyLinesToKeep: 2
IndentWidth: 2
ContinuationIndentWidth: 2
UseTab: Never
AccessModifierOffset: 0
......@@ -20,13 +21,14 @@ SpacesBeforeTrailingComments: 2
Cpp11BracedListStyle: true
BreakBeforeBraces: Attach
SpacesInParentheses: true
SpacesInParentheses: false
SpaceInEmptyParentheses: false
AlwaysBreakTemplateDeclarations: true
BreakBeforeBinaryOperators: NonAssignment
BreakConstructorInitializersBeforeComma: true
BreakBeforeBinaryOperators: false
SpacesInAngles: true
#clang-format-4.0
#BreakStringLiterals: true
......
......@@ -45,7 +45,7 @@ namespace gum {
template <typename GUM_SCALAR>
GibbsKL<GUM_SCALAR>::GibbsKL( const IBayesNet<GUM_SCALAR>& P,
const IBayesNet<GUM_SCALAR>& Q )
const IBayesNet<GUM_SCALAR>& Q )
: KL<GUM_SCALAR>( P, Q )
, ApproximationScheme()
, GibbsOperator<GUM_SCALAR>( P ) {
......@@ -64,7 +64,8 @@ namespace gum {
GibbsKL<GUM_SCALAR>::GibbsKL( const KL<GUM_SCALAR>& kl )
: KL<GUM_SCALAR>( kl )
, ApproximationScheme()
, GibbsOperator<GUM_SCALAR>( kl.p() ) {
// Gibbs operator with 10% of nodes changes at random between each samples
, GibbsOperator<GUM_SCALAR>( kl.p(), 1 + ( kl.p().size() / 10 ), true ) {
GUM_CONSTRUCTOR( GibbsKL );
setEpsilon( GIBBSKL_DEFAULT_EPSILON );
......@@ -88,7 +89,7 @@ namespace gum {
_q.completeInstantiation( Iq );
gum::Instantiation I = this->_monteCarloSample( this->_p ); // p or q ?
gum::Instantiation I = this->monteCarloSample();
initApproximationScheme();
// map between particle() variables and _q variables (using name of vars)
......@@ -99,11 +100,9 @@ namespace gum {
&_q.variableFromName( I.variable( ite ).name() ) );
}
float w = 1.;
// BURN IN
for ( Idx i = 0; i < burnIn(); i++ )
I = this->drawNextInstance( &w, I, this->_p );
I = this->nextSample( I );
// SAMPLING
_klPQ = _klQP = _hellinger = (GUM_SCALAR)0.0;
......@@ -117,7 +116,7 @@ namespace gum {
do {
this->disableMinEpsilonRate();
I = this->drawNextInstance( &w, I, this->_p );
I = this->nextSample( I );
updateApproximationScheme();
//_p.synchroInstantiations( Ip,I);
......
......@@ -50,7 +50,7 @@ namespace gum {
template <typename GUM_SCALAR>
class GibbsSampling : public ApproximateInference<GUM_SCALAR>,
public GibbsOperator<GUM_SCALAR> {
public GibbsOperator<GUM_SCALAR> {
public:
/**
......@@ -76,20 +76,12 @@ namespace gum {
* @param hardEv hard evidences values
*
* Uses the Gibbs sampling method to generate a new sample given the previous
* one.
* The method is implemented in the inherited class GibbsOperator. This function
* only makes the
* call to it.
*It consists of choosing one node x to sample, given the instantiation of all other
* nodes.
*It requires computing of P( x \given instantiation_markovblanket(x)).
* one. The method is implemented in the inherited class GibbsOperator. This
* function only makes the call to it. It consists of choosing one node x to
* sample, given the instantiation of all other nodes. It requires computing of
* P( x \given instantiation_markovblanket(x)).
*/
virtual Instantiation
_draw( float* w,
Instantiation prev ,
const IBayesNet<GUM_SCALAR>& bn = BayesNet<GUM_SCALAR>(),
const NodeSet& hardEvNodes = NodeSet(),
const NodeProperty<Idx>& hardEv = NodeProperty<Idx>() );
virtual Instantiation _draw( float* w, Instantiation prev );
/// draws a Monte Carlo sample
/**
......@@ -102,22 +94,7 @@ namespace gum {
* class Approximate Inference because it also initializes attributes needed for
* Gibbs sampling.
*/
virtual Instantiation _monteCarloSample( const IBayesNet<GUM_SCALAR>& bn );
/// fired when Bayesian network is contextualized
/**
* @param bn the contextualized BayesNetFragment
* @param targets inference target variables
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Adds the evidence and target variables.
*
*/
virtual void _onContextualize( BayesNetFragment<GUM_SCALAR>* bn,
const NodeSet& targets,
const NodeSet& hardEvNodes,
const NodeProperty<Idx>& hardEv );
virtual Instantiation _monteCarloSample();
virtual void _onEvidenceAdded( const NodeId id, bool isHardEvidence );
virtual void _onEvidenceErased( const NodeId id, bool isHardEvidence );
......
......@@ -31,115 +31,43 @@ namespace gum {
/// default constructor
template <typename GUM_SCALAR>
GibbsSampling<GUM_SCALAR>::GibbsSampling(const IBayesNet<GUM_SCALAR>* BN)
: ApproximateInference<GUM_SCALAR>(BN), GibbsOperator<GUM_SCALAR>(*BN) {
GUM_CONSTRUCTOR(GibbsSampling);
GibbsSampling<GUM_SCALAR>::GibbsSampling( const IBayesNet<GUM_SCALAR>* BN )
: ApproximateInference<GUM_SCALAR>( BN )
, GibbsOperator<GUM_SCALAR>( *BN ) {
GUM_CONSTRUCTOR( GibbsSampling );
}
/// destructor
/// destructor
template <typename GUM_SCALAR>
GibbsSampling<GUM_SCALAR>::~GibbsSampling() {
GUM_DESTRUCTOR(GibbsSampling);
GUM_DESTRUCTOR( GibbsSampling );
}
template <typename GUM_SCALAR>
Instantiation GibbsSampling<GUM_SCALAR>::_monteCarloSample(const IBayesNet<GUM_SCALAR>& bn){
return GibbsOperator<GUM_SCALAR>::_monteCarloSample(bn);
Instantiation GibbsSampling<GUM_SCALAR>::_monteCarloSample() {
return GibbsOperator<GUM_SCALAR>::monteCarloSample( samplingBN() );
}
template <typename GUM_SCALAR>
Instantiation GibbsSampling<GUM_SCALAR>::_burnIn(){
gum::Instantiation Ip;
float w = 1.;
Ip = _monteCarloSample(this->BN());
if (this->burnIn() == 0)
return Ip;
for (Size i = 1; i < this->burnIn(); i++)
Ip = this->_draw(&w, Ip);
return Ip;
}
/// draws next sample for gibbs sampling
template <typename GUM_SCALAR>
Instantiation GibbsSampling<GUM_SCALAR>::_draw(float* w, Instantiation prev, const IBayesNet<GUM_SCALAR>& bn, const NodeSet& hardEvNodes, const NodeProperty<Idx>& hardEv){
return this->drawNextInstance(w, prev, this->BN());
}
/* initializing node properties after contextualizing the BN in order for the computation to be lighter */
template <typename GUM_SCALAR>
void GibbsSampling<GUM_SCALAR>::_onContextualize(BayesNetFragment<GUM_SCALAR>* bn, const NodeSet& targets, const NodeSet& hardEvNodes, const NodeProperty<Idx>& hardEv) {
for (auto targ = targets.begin(); targ != targets.end(); ++targ)
this->addTarget(*targ);
for (auto ev = hardEvNodes.begin(); ev != hardEvNodes.end(); ++ev)
this->addEvidence(*ev, hardEv[*ev]);
}
template <typename GUM_SCALAR>
INLINE void GibbsSampling<GUM_SCALAR>::_onEvidenceAdded( const NodeId id,
bool isHardEvidence ) {
if ( isHardEvidence )
this->addHardEvidence( id, this->hardEvidence()[id] );
else {
this->addSoftEvidence( *( this->evidence()[id] ) );
}
Instantiation GibbsSampling<GUM_SCALAR>::_burnIn() {
}
gum::Instantiation Ip;
if ( this->burnIn() == 0 ) return Ip;
template <typename GUM_SCALAR>
INLINE void GibbsSampling<GUM_SCALAR>::_onEvidenceErased( const NodeId id,
bool isHardEvidence ) {
if ( isHardEvidence )
this->eraseHardEvidence( id );
float w = 1.;
Ip = _monteCarloSample();
for ( Size i = 1; i < this->burnIn(); i++ )
Ip = this->_draw( &w, Ip );
return Ip;
}
/// draws next sample for gibbs sampling
template <typename GUM_SCALAR>
INLINE void GibbsSampling<GUM_SCALAR>::_onAllEvidenceErased( bool contains_hard_evidence ){
this->eraseAllEvidenceOperator();
}
template <typename GUM_SCALAR>
INLINE void
GibbsSampling<GUM_SCALAR>::_onEvidenceChanged( const NodeId id,
bool hasChangedSoftHard ) {
if ( this->hardEvidence().exists( id ) ) {
// soft evidence has been removed
this->eraseSoftEvidence( id );
this->addHardEvidence( id, this->hardEvidence()[id] );
} else {
// hard evidence has been removed
this->eraseHardEvidence( id );
this->addSoftEvidence( *( this->evidence()[id] ) );
}
Instantiation GibbsSampling<GUM_SCALAR>::_draw( float* w, Instantiation prev ) {
return this->drawNextInstance( w, prev,samplingBN() );
}
}
......@@ -80,20 +80,6 @@ namespace gum {
_draw(float *w, Instantiation prev, const IBayesNet<GUM_SCALAR> &bn = BayesNet<GUM_SCALAR>(),
const NodeSet &hardEvNodes = NodeSet(), const NodeProperty<Idx> &hardEv = NodeProperty<Idx>());
///fired when Bayesian network is contextualized
/**
* @param bn the contextualized BayesNetFragment
* @param targets inference target variables
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Adds the target and evidence variables.
*
*/
virtual void
_onContextualize(BayesNetFragment<GUM_SCALAR> *bn, const NodeSet &targets, const NodeSet &hardEvNodes,
const NodeProperty<Idx> &hardEv);
};
extern template
......
......@@ -85,16 +85,5 @@ namespace gum {
}
template <typename GUM_SCALAR>
void MonteCarloSampling<GUM_SCALAR>::_onContextualize(BayesNetFragment<GUM_SCALAR>* bn, const NodeSet& targets, const NodeSet& hardEvNodes, const NodeProperty<Idx>& hardEv) {
for (auto targ = targets.begin(); targ != targets.end(); ++targ)
this->addTarget(*targ);
for (auto ev = hardEvNodes.begin(); ev != hardEvNodes.end(); ++ev)
this->addEvidence(*ev, hardEv[*ev]);
}
}
......@@ -19,7 +19,8 @@
***************************************************************************/
/**
* @file
* @brief Implements approximate inference algorithms from Loopy Belief Propagation.
* @brief Implements approximate inference algorithms from Loopy Belief
* Propagation.
*
* @author Paul ALAM & Pierre-Henri WUILLEMIN
*
......@@ -33,57 +34,53 @@ namespace gum {
template <typename GUM_SCALAR, template <typename> class APPROX>
HybridApproxInference<GUM_SCALAR, APPROX>::HybridApproxInference(const IBayesNet<GUM_SCALAR>* BN)
: APPROX<GUM_SCALAR>(BN){
HybridApproxInference<GUM_SCALAR, APPROX>::HybridApproxInference(
const IBayesNet<GUM_SCALAR>* BN )
: APPROX<GUM_SCALAR>( BN ) {
GUM_CONSTRUCTOR(HybridApproxInference);
}
GUM_CONSTRUCTOR( HybridApproxInference );
}
template <typename GUM_SCALAR, template <typename> class APPROX>
HybridApproxInference<GUM_SCALAR, APPROX>::~HybridApproxInference() {
GUM_DESTRUCTOR(HybridApproxInference);
GUM_DESTRUCTOR( HybridApproxInference );
}
template <typename GUM_SCALAR, template <typename> class APPROX>
void HybridApproxInference<GUM_SCALAR, APPROX>::_makeInference(){
void HybridApproxInference<GUM_SCALAR, APPROX>::_makeInference() {
LoopyBeliefPropagation<GUM_SCALAR> lbp (&this->BN());
lbp.setMaxIter(DEFAULT_LBP_MAX_ITER);
lbp.makeInference();
LoopyBeliefPropagation<GUM_SCALAR> lbp( &this->BN() );
lbp.setMaxIter( DEFAULT_LBP_MAX_ITER );
lbp.makeInference();
const auto &bn = this->BN();
auto hardEv = this->hardEvidence();
auto hardEvNodes = this->hardEvidenceNodes();
const auto& bn = this->BN();
auto hardEv = this->hardEvidence();
auto hardEvNodes = this->hardEvidenceNodes();
if (!this->isContextualized)
this->contextualize();
if ( !this->isContextualized ) this->contextualize();
if (!this->isSetEstimator)
this->_setEstimatorFromLBP(&lbp, this->hardEvidenceNodes() );
if ( !this->isSetEstimator )
this->_setEstimatorFromLBP( &lbp, this->hardEvidenceNodes() );
this->initApproximationScheme();
gum::Instantiation Ip;
float w = .0;//
this->initApproximationScheme();
gum::Instantiation Ip;
float w = .0; //
// Burn in
Ip = this->_burnIn();
// Burn in
Ip = this->_burnIn();
do {
do {
Ip = this->_draw(&w, Ip, bn, hardEvNodes, hardEv);
this->__estimator.update(Ip,w);
this->updateApproximationScheme();
Ip = this->_draw( &w, Ip );
this->__estimator.update( Ip, w );
this->updateApproximationScheme();
} while(this->continueApproximationScheme(this->__estimator.confidence()));
this->isSetEstimator = false;
} while (
this->continueApproximationScheme( this->__estimator.confidence() ) );
this->isSetEstimator = false;
}
}
......@@ -32,91 +32,85 @@
namespace gum {
/**
* @class ImportanceInference importanceInference.h
*<agrum/BN/inference/importanceInference.h>
* @brief class for making Importance sampling inference in bayesian networks.
* @ingroup bn_approximation
*
* This class overrides pure function declared in the inherited class ApproximateInference.
* It defines the way Importance sampling draws a sample.
*
*/
template<typename GUM_SCALAR>
class ImportanceSampling : public ApproximateInference<GUM_SCALAR> {
/**
* @class ImportanceInference importanceInference.h
*<agrum/BN/inference/importanceInference.h>
* @brief class for making Importance sampling inference in bayesian networks.
* @ingroup bn_approximation
*
* This class overrides pure function declared in the inherited class
*ApproximateInference.
* It defines the way Importance sampling draws a sample.
*
*/
template <typename GUM_SCALAR>
class ImportanceSampling : public ApproximateInference<GUM_SCALAR> {
public:
/**
* Default constructor
*/
ImportanceSampling( const IBayesNet<GUM_SCALAR>* BN );
/**
* Default constructor
*/
ImportanceSampling(const IBayesNet<GUM_SCALAR> *BN);
/**
* Destructor
*/
virtual ~ImportanceSampling();
/**
* Destructor
*/
virtual ~ImportanceSampling();
protected:
/// draws a defined number of samples without updating the estimators
virtual Instantiation _burnIn();
/// draws a defined number of samples without updating the estimators
virtual Instantiation _burnIn();
/// draws a sample according to Importance sampling
/**
* @param w the weight of sample being generated
* @param prev the previous sample generated
* @param bn the bayesian network containing the evidence
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* uses the Importance sampling method to generate a new sample using an
* evidence-mutilated Bayesian network.
* Each node added to the sample (in a topological order) has a weight.
* The sample's weight is the product of all weights.
*/
virtual Instantiation
_draw(float *w, Instantiation prev, const IBayesNet<GUM_SCALAR> &bn = BayesNet<GUM_SCALAR>(),
const NodeSet &hardEvNodes = NodeSet(), const NodeProperty<Idx> &hardEv = NodeProperty<Idx>());
/// modifies the cpts of a BN in order to tend to uniform distributions
/**
* @param bn a BN fragment on which we wish to modify CPTs
* @param epsilon a default parameter used to scale the modification of the distributions
*
* For every CPT in the BN, epsilon is added to each potential value before normalizing
*
*/
virtual void _unsharpenBN(BayesNetFragment<GUM_SCALAR> *bn, float epsilon = 1e-2);
///fired when Bayesian network is contextualized
/**
* @param bn the contextualized BayesNetFragment
* @param targets inference target variables
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Adds the target variables, erases the evidence variables and unsharpens the BN.
*
*/
virtual void
_onContextualize(BayesNetFragment<GUM_SCALAR> *bn, const NodeSet &targets, const NodeSet &hardEvNodes,
const NodeProperty<Idx> &hardEv);
};
extern template
class ImportanceSampling<float>;
extern template
class ImportanceSampling<double>;
/// draws a sample according to Importance sampling
/**
* @param w the weight of sample being generated
* @param prev the previous sample generated
* @param bn the bayesian network containing the evidence
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* uses the Importance sampling method to generate a new sample using an
* evidence-mutilated Bayesian network.
* Each node added to the sample (in a topological order) has a weight.
* The sample's weight is the product of all weights.
*/
virtual Instantiation _draw( float* w, Instantiation prev );
/// modifies the cpts of a BN in order to tend to uniform distributions
/**
* @param bn a BN fragment on which we wish to modify CPTs
* @param epsilon a default parameter used to scale the modification of the
* distributions
*
* For every CPT in the BN, epsilon is added to each potential value before
* normalizing
*
*/
virtual void _unsharpenBN( BayesNetFragment<GUM_SCALAR>* bn,
float epsilon = 1e-2 );
/// fired when Bayesian network is contextualized
/**
* @param bn the contextualized BayesNetFragment
* @param targets inference target variables
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Adds the target variables, erases the evidence variables and unsharpens the
* BN.
*
*/
virtual void _onContextualize( BayesNetFragment<GUM_SCALAR>* bn );
};
extern template class ImportanceSampling<float>;
extern template class ImportanceSampling<double>;
}
#include <agrum/BN/inference/importanceSampling_tpl.h>
......
......@@ -56,12 +56,8 @@ namespace gum {
}
template <typename GUM_SCALAR>
Instantiation
ImportanceSampling<GUM_SCALAR>::_draw( float* w,
Instantiation prev,
const IBayesNet<GUM_SCALAR>& bn,
const NodeSet& hardEvNodes,
const NodeProperty<Idx>& hardEv ) {
Instantiation ImportanceSampling<GUM_SCALAR>::_draw( float* w,
Instantiation prev ) {
GUM_SCALAR pSurQ = 1.;
......@@ -69,16 +65,15 @@ namespace gum {
prev.clear();
pSurQ = 1.;
for ( auto ev = hardEvNodes.beginSafe(); ev != hardEvNodes.endSafe();
++ev ) {
prev.add( bn.variable( *ev ) );
prev.chgVal( bn.variable( *ev ), hardEv[*ev] );
for ( auto ev : this->hardEvidenceNodes()) {
prev.add( BN().variable( ev ) );
prev.chgVal( BN().variable( ev ), this->evidence()[ev] );
}
for ( auto nod : this->BN().topologicalOrder() ) {
this->_addVarSample( nod, &prev, this->BN() );
auto probaQ = this->BN().cpt( nod ).get( prev );
auto probaP = bn.cpt( nod ).get( prev );
this->_addVarSample( nod, &prev);
auto probaQ = BN().cpt( nod ).get( prev );
auto probaP = _frag->cpt( nod ).get( prev );
if ( ( probaP == 0 ) || ( probaQ == 0 ) ) {
pSurQ = 0;
continue;
......@@ -102,8 +97,6 @@ namespace gum {
void
ImportanceSampling<GUM_SCALAR>::_unsharpenBN( BayesNetFragment<GUM_SCALAR>* bn,
float epsilon ) {
GUM_CHECKPOINT;
for ( auto nod : bn->nodes().asNodeSet() ) {
Potential<GUM_SCALAR>* p = new Potential<GUM_SCALAR>();
*p = bn->cpt( nod ).isNonZeroMap().scale( epsilon ) + bn->cpt( nod );
......@@ -114,31 +107,39 @@ namespace gum {
template <typename GUM_SCALAR>
void ImportanceSampling<GUM_SCALAR>::_onContextualize(
BayesNetFragment<GUM_SCALAR>* bn,
const NodeSet& targets,
const NodeSet& hardEvNodes,
const NodeProperty<Idx>& hardEv ) {
BayesNetFragment<GUM_SCALAR>* bn ) {
/*
Sequence<NodeId> sid;
for ( NodeSet::iterator ev = hardEvNodes.begin(); ev != hardEvNodes.end();
++ev )
sid << *ev;
*/
auto hardEvNodes = this->hardEvidenceNodes();
auto hardEv = this->hardEvidence();
auto targets = this->targets();
GUM_CHECKPOINT;
Sequence<NodeId> sid;
for ( NodeSet::iterator ev = hardEvNodes.begin(); ev != hardEvNodes.end();
++ev )
sid << *ev;
for ( Size i = 0; i < sid.size(); i++ ) {
bn->uninstallCPT( sid[i] );
bn->uninstallNode( sid[i] );
for ( auto ev : hardEvNodes ) {
GUM_CHECKPOINT;
bn->uninstallCPT( ev );
GUM_TRACE_VAR( ev );
GUM_TRACE_VAR( this->hardEvidenceNodes() );
bn->installCPT( ev, new Potential<GUM_SCALAR>( *this->evidence()[ev] ) );
GUM_CHECKPOINT;
// we keep the variables with hard evidence but alone
// bn->uninstallNode( sid[i] );
}
GUM_CHECKPOINT;
for ( auto targ = targets.begin(); targ != targets.end(); ++targ ) {
if ( this->BN().dag().exists( *targ ) ) this->addTarget( *targ );
}
GUM_CHECKPOINT;
auto minParam = bn->minNonZeroParam();
auto minAccepted = this->epsilon() / bn->maxVarDomainSize();
GUM_CHECKPOINT;
if ( minParam < minAccepted ) this->_unsharpenBN( bn, minAccepted );
}
}
......@@ -26,19 +26,17 @@
#ifndef GUM_ESTIMATOR_H
#define GUM_ESTIMATOR_H
#include <vector>
#include <agrum/BN/IBayesNet.h>
#include <agrum/core/hashTable.h>
#include <agrum/BN/inference/loopyBeliefPropagation.h>
#include <agrum/core/hashTable.h>
#include <vector>
namespace gum {
template <typename GUM_SCALAR>
class Estimator {
public:
template <typename GUM_SCALAR>
class Estimator {
public:
/**
* @class Estimator estimator.h <agrum/BN/inference/estimator.h>
* @brief class for estimating tools for approximate inference
......@@ -54,9 +52,9 @@ namespace gum {
/**
* Constructor with Bayesian Network