[aGrUM] refactoring close to the end

parent fe7d1c64
......@@ -95,11 +95,6 @@ namespace gum {
* Gibbs sampling.
*/
virtual Instantiation _monteCarloSample();
virtual void _onEvidenceAdded( const NodeId id, bool isHardEvidence );
virtual void _onEvidenceErased( const NodeId id, bool isHardEvidence );
virtual void _onAllEvidenceErased( bool contains_hard_evidence );
virtual void _onEvidenceChanged( const NodeId id, bool hasChangedSoftHard );
};
extern template class GibbsSampling<float>;
......
......@@ -69,6 +69,6 @@ namespace gum {
template < typename GUM_SCALAR >
Instantiation GibbsSampling< GUM_SCALAR >::_draw(float* w, Instantiation prev) {
*w = 1.0;
return this->nextSample(prev);
return GibbsOperator< GUM_SCALAR >::nextSample(prev);
}
}
......@@ -77,8 +77,7 @@ namespace gum {
*
*/
virtual Instantiation
_draw(float *w, Instantiation prev, const IBayesNet<GUM_SCALAR> &bn = BayesNet<GUM_SCALAR>(),
const NodeSet &hardEvNodes = NodeSet(), const NodeProperty<Idx> &hardEv = NodeProperty<Idx>());
_draw(float *w, Instantiation prev);
};
......
......@@ -57,12 +57,8 @@ namespace gum {
template < typename GUM_SCALAR >
Instantiation
MonteCarloSampling< GUM_SCALAR >::_draw(float* w,
Instantiation prev,
const IBayesNet< GUM_SCALAR >& bn,
const NodeSet& hardEvNodes,
const NodeProperty< Idx >& hardEv) {
Instantiation MonteCarloSampling< GUM_SCALAR >::_draw(float* w,
Instantiation prev) {
*w = 1.;
bool wrong_value = false;
......@@ -78,7 +74,7 @@ namespace gum {
}
}
} while (wrong_value);
GUM_TRACE(prev);
return prev;
}
}
......@@ -19,7 +19,8 @@
***************************************************************************/
/**
* @file
* @brief This file implements a Hybrid sampling class using LoopyBeliefPropagation and
* @brief This file implements a Hybrid sampling class using LoopyBeliefPropagation
* and
* an approximate Inference method.
*
* @author Paul ALAM & Pierre-Henri WUILLEMIN
......@@ -27,60 +28,62 @@
#ifndef GUM_HYBRID_INFERENCE_H
#define GUM_HYBRID_INFERENCE_H
#include <agrum/BN/inference/weightedSampling.h>
#include <agrum/BN/inference/importanceSampling.h>
#include <agrum/BN/inference/MonteCarloSampling.h>
#include <agrum/BN/inference/GibbsSampling.h>
#include <agrum/BN/inference/MonteCarloSampling.h>
#include <agrum/BN/inference/importanceSampling.h>
#include <agrum/BN/inference/weightedSampling.h>
namespace gum {
/**
* @class HybridApproxInference hybridApproxInference.h
*<agrum/BN/inference/hybridApproxInference.h>
* @brief class for making hybrid sampling inference with loopy belief propagation and
* an approximation inference method in bayesian networks.
* @ingroup bn_approximation
*
* This class inherits of template class APPROX, which SHOULD be one of the 4 approximate
* inference methods (MonteCarlo, Weighted, Importance, Gibbs).
* It makes the inference with respect to the inherited class' method, after having
* initialized the estimators with the posteriors obtained by running LoopyBeliefPropagation
* algorithm.
*
*/
template <typename GUM_SCALAR, template <typename> class APPROX>
class HybridApproxInference : public APPROX<GUM_SCALAR> {
public:
/**
* Default constructor
*/
HybridApproxInference(const IBayesNet<GUM_SCALAR>* bn);
/**
* @class HybridApproxInference hybridApproxInference.h
*<agrum/BN/inference/hybridApproxInference.h>
* @brief class for making hybrid sampling inference with loopy belief propagation
*and
* an approximation inference method in bayesian networks.
* @ingroup bn_approximation
*
* This class inherits of template class APPROX, which SHOULD be one of the 4
*approximate
* inference methods (MonteCarlo, Weighted, Importance, Gibbs).
* It makes the inference with respect to the inherited class' method, after
*having
* initialized the estimators with the posteriors obtained by running
*LoopyBeliefPropagation
* algorithm.
*
*/
/**
* destructor
*/
virtual ~HybridApproxInference();
template < typename GUM_SCALAR, template < typename > class APPROX >
class HybridApproxInference : public APPROX< GUM_SCALAR > {
/// makes the inference by generating samples w.r.t the mother class' sampling method after initalizing estimators with loopy belief propagation
virtual void _makeInference();
public:
/**
* Default constructor
*/
HybridApproxInference(const IBayesNet< GUM_SCALAR >* bn);
};
/**
* destructor
*/
virtual ~HybridApproxInference();
extern template class HybridApproxInference<float, WeightedSampling>;
extern template class HybridApproxInference<double, WeightedSampling>;
/// makes the inference by generating samples w.r.t the mother class' sampling
/// method after initalizing estimators with loopy belief propagation
virtual void _makeInference();
};
extern template class HybridApproxInference<float, ImportanceSampling>;
extern template class HybridApproxInference<double, ImportanceSampling>;
extern template class HybridApproxInference< float, WeightedSampling >;
extern template class HybridApproxInference< double, WeightedSampling >;
extern template class HybridApproxInference<float, MonteCarloSampling>;
extern template class HybridApproxInference<double, MonteCarloSampling>;
extern template class HybridApproxInference< float, ImportanceSampling >;
extern template class HybridApproxInference< double, ImportanceSampling >;
extern template class HybridApproxInference<float, GibbsSampling>;
extern template class HybridApproxInference<double, GibbsSampling>;
extern template class HybridApproxInference< float, MonteCarloSampling >;
extern template class HybridApproxInference< double, MonteCarloSampling >;
extern template class HybridApproxInference< float, GibbsSampling >;
extern template class HybridApproxInference< double, GibbsSampling >;
}
#include <agrum/BN/inference/hybridApproxInference_tpl.h>
......
......@@ -99,14 +99,12 @@ namespace gum {
template < typename GUM_SCALAR >
const Potential< GUM_SCALAR >&
ApproximateInference< GUM_SCALAR >::_posterior(const NodeId id) {
GUM_CHECKPOINT;
return __estimator.posterior(this->BN().variable(id));
}
template < typename GUM_SCALAR >
const Potential< GUM_SCALAR >&
ApproximateInference< GUM_SCALAR >::_posterior(const std::string& name) {
GUM_CHECKPOINT;
return _posterior(this->BN().idFromName(name));
}
......@@ -129,24 +127,25 @@ namespace gum {
dSeparation dsep = gum::dSeparation();
NodeSet requisite;
dsep.requisiteNodes(__samplingBN->dag(),
this->targets(),
this->hardEvidenceNodes(),
this->softEvidenceNodes(),
requisite);
requisite += this->hardEvidenceNodes() + this->targets();
dsep.requisiteNodes(
this->BN().dag(),
this->BN().nodes().asNodeSet(), // no target for approximateInference
this->hardEvidenceNodes(),
this->softEvidenceNodes(), // should be empty
requisite);
requisite += this->hardEvidenceNodes();
auto nonRequisite = __samplingBN->dag().asNodeSet() - requisite;
auto nonRequisite = this->BN().dag().asNodeSet() - requisite;
for (auto elmt : nonRequisite)
__samplingBN->uninstallNode(elmt);
GUM_TRACE(__samplingBN->toDot());
for (auto hard : this->hardEvidenceNodes()) {
gum::Instantiation I;
I.add(this->BN().variable(hard));
I.chgVal(this->BN().variable(hard), this->hardEvidence()[hard]);
for (const auto& child : __samplingBN->dag().children(hard)) {
for (const auto& child : this->BN().dag().children(hard)) {
gum::Potential< GUM_SCALAR >* p = new gum::Potential< GUM_SCALAR >();
*p = this->BN().cpt(child).extract(I);
__samplingBN->installCPT(child, p);
......@@ -178,7 +177,8 @@ namespace gum {
Ip = this->_draw(&w, Ip);
__estimator.update(Ip, w);
updateApproximationScheme();
std::cout << Ip << __estimator.posterior(this->BN().variableFromName("h"))
<< " " << __estimator.confidence() << std::endl;
} while (continueApproximationScheme(__estimator.confidence()));
this->isSetEstimator = false;
......@@ -189,10 +189,12 @@ namespace gum {
void ApproximateInference< GUM_SCALAR >::_addVarSample(NodeId nod,
Instantiation* I) {
gum::Instantiation Itop = gum::Instantiation(samplingBN().cpt(nod));
Itop.forgetMaster();
Itop.erase(samplingBN().variable(nod));
I->add(samplingBN().variable(nod));
GUM_TRACE(samplingBN().cpt(nod).extract(Itop));
I->chgVal(samplingBN().variable(nod),
samplingBN().cpt(nod).extract(*I).draw());
samplingBN().cpt(nod).extract(Itop).draw());
}
}
......@@ -144,7 +144,7 @@ namespace gum {
private:
/// the set of single posteriors computed during the last inference
/** the posteriors are owned by LazyPropagation. */
HashTable<std::string, const Potential<GUM_SCALAR>*> __target_posteriors;
HashTable<std::string, Potential<GUM_SCALAR>*> __target_posteriors;
};
extern template class Estimator<float>;
......
......@@ -27,164 +27,164 @@
namespace gum {
template <typename GUM_SCALAR>
Estimator<GUM_SCALAR>::Estimator() {
template < typename GUM_SCALAR >
Estimator< GUM_SCALAR >::Estimator() {
GUM_CONSTRUCTOR( Estimator );
GUM_CONSTRUCTOR(Estimator);
_wtotal = (GUM_SCALAR)0.;
_ntotal = 0;
_bn = nullptr;
}
template <typename GUM_SCALAR>
Estimator<GUM_SCALAR>::Estimator( const IBayesNet<GUM_SCALAR>* bn )
template < typename GUM_SCALAR >
Estimator< GUM_SCALAR >::Estimator(const IBayesNet< GUM_SCALAR >* bn)
: Estimator() {
_bn = bn;
for ( gum::NodeGraphPartIterator iter = bn->nodes().begin();
iter != bn->nodes().end();
++iter )
for (gum::NodeGraphPartIterator iter = bn->nodes().begin();
iter != bn->nodes().end();
++iter)
_estimator.insert(
bn->variable( *iter ).name(),
std::vector<GUM_SCALAR>( bn->variable( *iter ).domainSize(), 0.0 ) );
bn->variable(*iter).name(),
std::vector< GUM_SCALAR >(bn->variable(*iter).domainSize(), 0.0));
GUM_CONSTRUCTOR( Estimator );
GUM_CONSTRUCTOR(Estimator);
}
template <typename GUM_SCALAR>
INLINE Estimator<GUM_SCALAR>::~Estimator() {
GUM_DESTRUCTOR( Estimator );
template < typename GUM_SCALAR >
INLINE Estimator< GUM_SCALAR >::~Estimator() {
GUM_DESTRUCTOR(Estimator);
// remove all the posteriors computed
for ( const auto& pot : __target_posteriors )
for (const auto& pot : __target_posteriors)
delete pot.second;
}
/* adds all potential target variables from a given BN to the Estimator */
template <typename GUM_SCALAR>
void Estimator<GUM_SCALAR>::setFromBN( const IBayesNet<GUM_SCALAR>* bn,
const NodeSet& hardEvidence ) {
template < typename GUM_SCALAR >
void Estimator< GUM_SCALAR >::setFromBN(const IBayesNet< GUM_SCALAR >* bn,
const NodeSet& hardEvidence) {
for ( gum::NodeGraphPartIterator iter = bn->nodes().begin();
iter != bn->nodes().end();
++iter ) {
for (gum::NodeGraphPartIterator iter = bn->nodes().begin();
iter != bn->nodes().end();
++iter) {
auto v = bn->variable( *iter ).name();
auto v = bn->variable(*iter).name();
if ( !hardEvidence.contains( *iter ) ) {
if (!hardEvidence.contains(*iter)) {
if ( _estimator.exists( v ) )
_estimator[v] = std::vector<GUM_SCALAR>(
bn->variable( *iter ).domainSize(), (GUM_SCALAR)0.0 );
if (_estimator.exists(v))
_estimator[v] = std::vector< GUM_SCALAR >(
bn->variable(*iter).domainSize(), (GUM_SCALAR)0.0);
else
_estimator.insert(
v,
std::vector<GUM_SCALAR>( bn->variable( *iter ).domainSize(),
(GUM_SCALAR)0.0 ) );
_estimator.insert(v,
std::vector< GUM_SCALAR >(
bn->variable(*iter).domainSize(), (GUM_SCALAR)0.0));
}
}
}
/// we multiply the posteriors obtained by LoopyBeliefPropagation by the it's
/// number of iterations
template <typename GUM_SCALAR>
void Estimator<GUM_SCALAR>::setFromLBP( LoopyBeliefPropagation<GUM_SCALAR>* lbp,
const NodeSet& hardEvidence ) {
template < typename GUM_SCALAR >
void
Estimator< GUM_SCALAR >::setFromLBP(LoopyBeliefPropagation< GUM_SCALAR >* lbp,
const NodeSet& hardEvidence) {
for ( gum::NodeGraphPartIterator iter = lbp->BN().nodes().begin();
iter != lbp->BN().nodes().end();
++iter ) {
for (gum::NodeGraphPartIterator iter = lbp->BN().nodes().begin();
iter != lbp->BN().nodes().end();
++iter) {
if ( !hardEvidence.contains( *iter ) ) {
if (!hardEvidence.contains(*iter)) {
std::vector<GUM_SCALAR> v;
auto p = lbp->posterior( *iter );
gum::Instantiation inst( p );
std::vector< GUM_SCALAR > v;
auto p = lbp->posterior(*iter);
gum::Instantiation inst(p);
for ( inst.setFirst(); !inst.end(); ++inst ) {
v.push_back( p[inst] * lbp->nbrIterations() );
for (inst.setFirst(); !inst.end(); ++inst) {
v.push_back(p[inst] * lbp->nbrIterations());
}
_estimator.insert( lbp->BN().variable( *iter ).name(), v );
_estimator.insert(lbp->BN().variable(*iter).name(), v);
}
}
}
/*update the Estimator given an instantiation I with weight bias w*/
template <typename GUM_SCALAR>
void Estimator<GUM_SCALAR>::update( Instantiation I, GUM_SCALAR w ) {
template < typename GUM_SCALAR >
void Estimator< GUM_SCALAR >::update(Instantiation I, GUM_SCALAR w) {
_wtotal += w;
_ntotal += 1;
for ( Idx i = 0; i < I.nbrDim(); i++ ) {
if ( _estimator.exists( I.variable( i ).name() ) )
_estimator[I.variable( i ).name()][I.val( i )] += w;
for (Idx i = 0; i < I.nbrDim(); i++) {
if (_estimator.exists(I.variable(i).name()))
_estimator[I.variable(i).name()][I.val(i)] += w;
}
}
/* returns the approximation CPT of a variable */
template <typename GUM_SCALAR>
const Potential<GUM_SCALAR>&
Estimator<GUM_SCALAR>::posterior( const DiscreteVariable& var ) {
if ( !_estimator.exists( var.name() ) )
GUM_ERROR( NotFound, "Target variable not found" );
template < typename GUM_SCALAR >
const Potential< GUM_SCALAR >&
Estimator< GUM_SCALAR >::posterior(const DiscreteVariable& var) {
Potential< GUM_SCALAR >* p = nullptr;
if (!_estimator.exists(var.name()))
GUM_ERROR(NotFound, "Target variable not found");
// check if we have already computed the posterior
if ( __target_posteriors.exists( var.name() ) ) {
return *( __target_posteriors[var.name()] );
if (__target_posteriors.exists(var.name())) {
p = __target_posteriors[var.name()];
} else {
p = new Potential< GUM_SCALAR >();
*p << var;
__target_posteriors.insert(var.name(), p);
}
Potential<GUM_SCALAR>* p = new Potential<GUM_SCALAR>();
*p << var;
p->fillWith( _estimator[var.name()] );
p->fillWith(_estimator[var.name()]);
p->normalize();
__target_posteriors.insert( var.name(), p );
return *p;
}
/* expected value considering a Bernouilli variable with parameter val */
template <typename GUM_SCALAR>
GUM_SCALAR Estimator<GUM_SCALAR>::EV( std::string name, int val ) {
template < typename GUM_SCALAR >
GUM_SCALAR Estimator< GUM_SCALAR >::EV(std::string name, int val) {
return _estimator[name][val] / _wtotal;
}
/* variance considering a Bernouilli variable with parameter val */
template <typename GUM_SCALAR>
GUM_SCALAR Estimator<GUM_SCALAR>::variance( std::string name, int val ) {
template < typename GUM_SCALAR >
GUM_SCALAR Estimator< GUM_SCALAR >::variance(std::string name, int val) {
GUM_SCALAR p = EV( name, val );
return p * ( 1 - p );
GUM_SCALAR p = EV(name, val);
return p * (1 - p);
}
/* returns maximum length of confidence intervals for each variable, each
* parameter */
template <typename GUM_SCALAR>
GUM_SCALAR Estimator<GUM_SCALAR>::confidence() {
template < typename GUM_SCALAR >
GUM_SCALAR Estimator< GUM_SCALAR >::confidence() {
GUM_SCALAR ic_max = 0;
for ( auto iter = _estimator.begin(); iter != _estimator.end(); ++iter ) {
for (auto iter = _estimator.begin(); iter != _estimator.end(); ++iter) {
for ( Size i = 0; i < iter.val().size(); i++ ) {
for (Size i = 0; i < iter.val().size(); i++) {
GUM_SCALAR ic =
2 * 1.96 * sqrt( variance( iter.key(), i ) / ( _ntotal - 1 ) );
if ( ic > ic_max ) ic_max = ic;
GUM_SCALAR ic = 2 * 1.96 * sqrt(variance(iter.key(), i) / (_ntotal - 1));
if (ic > ic_max) ic_max = ic;
}
}
......
......@@ -32,61 +32,54 @@
namespace gum {
/**
* @class WeightedInference weightedInference.h
*<agrum/BN/inference/weightedInference.h>
* @brief class for making Weighted sampling inference in bayesian networks.
* @ingroup bn_approximation
*
* This class overrides pure function declared in the inherited class ApproximateInference.
* It defines the way Weighted sampling draws a sample.
*
*/
template<typename GUM_SCALAR>
class WeightedSampling : public ApproximateInference<GUM_SCALAR> {
/**
* @class WeightedInference weightedInference.h
*<agrum/BN/inference/weightedInference.h>
* @brief class for making Weighted sampling inference in bayesian networks.
* @ingroup bn_approximation
*
* This class overrides pure function declared in the inherited class
*ApproximateInference.
* It defines the way Weighted sampling draws a sample.
*
*/
template < typename GUM_SCALAR >
class WeightedSampling : public ApproximateInference< GUM_SCALAR > {
public:
/**
* Default constructor
*/
WeightedSampling(const IBayesNet< GUM_SCALAR >* BN);
/**
* Default constructor
*/
WeightedSampling(const IBayesNet<GUM_SCALAR> *BN);
/**
* Destructor
*/
virtual ~WeightedSampling();
/**
* Destructor
*/
virtual ~WeightedSampling();
protected:
/// draws a defined number of samples without updating the estimators
virtual Instantiation _burnIn();
/// draws a defined number of samples without updating the estimators
virtual Instantiation _burnIn();
/// draws a sample according to Weighted sampling
/**
* @param w the weight of sample being generated
* @param prev the previous sample generated
* @param bn the bayesian network containing the evidence
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Generates a new sample in topological order. Each sample has a weight bias.
* The sample weight is the product of each node's weight.
*
*/
virtual Instantiation
_draw(float *w, Instantiation prev, const IBayesNet<GUM_SCALAR> &bn = BayesNet<GUM_SCALAR>(),
const NodeSet &hardEvNodes = NodeSet(), const NodeProperty<Idx> &hardEv = NodeProperty<Idx>());
};
extern template
class WeightedSampling<float>;
extern template
class WeightedSampling<double>;
/// draws a sample according to Weighted sampling
/**
* @param w the weight of sample being generated
* @param prev the previous sample generated
* @param bn the bayesian network containing the evidence
* @param hardEvNodes hard evidence nodes
* @param hardEv hard evidences values
*
* Generates a new sample in topological order. Each sample has a weight bias.
* The sample weight is the product of each node's weight.
*
*/
virtual Instantiation _draw(float* w, Instantiation prev);
};
extern template class WeightedSampling< float >;
extern template class WeightedSampling< double >;
}
#include <agrum/BN/inference/weightedSampling_tpl.h>
......
......@@ -31,74 +31,60 @@
namespace gum {
/// Default constructor
template <typename GUM_SCALAR>
WeightedSampling<GUM_SCALAR>::WeightedSampling(const IBayesNet<GUM_SCALAR>* BN)
: ApproximateInference<GUM_SCALAR>(BN) {
this->setBurnIn(0);
GUM_CONSTRUCTOR(WeightedSampling);
/// Default constructor
template < typename GUM_SCALAR >
WeightedSampling< GUM_SCALAR >::WeightedSampling(
const IBayesNet< GUM_SCALAR >* BN)
: ApproximateInference< GUM_SCALAR >(BN) {
this->setBurnIn(0);
GUM_CONSTRUCTOR(WeightedSampling);
}
/// Destructor
template <typename GUM_SCALAR>
WeightedSampling<GUM_SCALAR>::~WeightedSampling() {
GUM_DESTRUCTOR(WeightedSampling);
/// Destructor
template < typename GUM_SCALAR >
WeightedSampling< GUM_SCALAR >::~WeightedSampling() {
GUM_DESTRUCTOR(WeightedSampling);
}
/// No burn in needed for Weighted sampling
template <typename GUM_SCALAR>
Instantiation WeightedSampling<GUM_SCALAR>::_burnIn(){
gum::Instantiation I;
return I;
/// No burn in needed for Weighted sampling
template < typename GUM_SCALAR >
Instantiation WeightedSampling< GUM_SCALAR >::_burnIn() {
gum::Instantiation I;
return I;
}
template <typename GUM_SCALAR>
Instantiation WeightedSampling<GUM_SCALAR>::_draw(float* w, Instantiation prev, const IBayesNet<GUM_SCALAR>& bn, const NodeSet& hardEvNodes, const NodeProperty<Idx>& hardEv){
*w = 1.;
bool wrongValue = false;
do {
prev.clear(); wrongValue = false; *w = 1.;
for (auto nod: this->BN().topologicalOrder()){
if (this->hardEvidenceNodes().contains(nod)) {
prev.add(this->BN().variable(nod));
prev.chgVal(this->BN().variable(nod), this->hardEvidence()[nod] );
auto localp = this->BN().cpt(nod).get(prev);
if (localp == 0) {
wrongValue = true;
break;
}
*w *= localp;
continue;
}
this->_addVarSample(nod, &prev);
}