00001 #ifndef _MCMC_HPP
00002 #define _MCMC_HPP
00003
00004 #include "properties.hpp"
00005 #include "random.hpp"
00006
00007 namespace Arak {
00008
00012 template <class MarkovChainTraits>
00013 class MarkovChain {
00014
00015 public:
00016
00017 typedef typename MarkovChainTraits::StateType StateType;
00018 typedef typename MarkovChainTraits::DistributionType DistributionType;
00019 typedef typename MarkovChainTraits::ProposalType ProposalType;
00020 typedef typename MarkovChainTraits::MoveType MoveType;
00021
00022 protected:
00023
00027 Arak::Util::Random& random;
00028
00032 DistributionType& dist;
00033
00037 ProposalType& proposal;
00038
00042 StateType& state;
00043
00047 double logLikelihood;
00048
00052 unsigned long int numSteps;
00053
00057 unsigned long int numMoves;
00058
00062 unsigned int burnIn;
00063
00067 unsigned int stride;
00068
00069 public:
00070
00083 MarkovChain(DistributionType& dist,
00084 ProposalType& proposal,
00085 StateType& state,
00086 const Arak::Util::PropertyMap& props,
00087 Arak::Util::Random& random = Arak::Util::default_random)
00088 : random(random),
00089 dist(dist),
00090 proposal(proposal),
00091 state(state),
00092 logLikelihood(dist.logLikelihood(state)),
00093 numSteps(0),
00094 numMoves(0),
00095 burnIn(0),
00096 stride(0) {
00097 using namespace Arak::Util;
00098
00099 if (hasp(props, "arak.mcmc.estimation.burn_in"))
00100 assert(parse(getp(props, "arak.mcmc.estimation.burn_in"), burnIn));
00101 if (hasp(props, "arak.mcmc.estimation.stride"))
00102 assert(parse(getp(props, "arak.mcmc.estimation.stride"), stride));
00103 }
00104
00111 const StateType& getState() const { return state; }
00112
00116 double getLogLikelihood() const { return logLikelihood; }
00117
00124 StateType& getState() { return state; }
00125
00131 const DistributionType& getDistribution() const {
00132 return dist;
00133 }
00134
00141 virtual bool advance(){
00142 numSteps++;
00143
00144 proposal.sample(state, random, true);
00145 MoveType& move = proposal.move();
00146
00147 double pll = proposal.ll(state);
00148 double rpll = proposal.rll(state);
00149
00150 double llh = dist.logLikelihood(state);
00151
00152 move.execute(state);
00153 double llh_new = dist.logLikelihood(state);
00154
00155 double mh_log_ratio = (llh_new + rpll) - (llh + pll);
00156 double mh_ratio = exp(mh_log_ratio);
00157
00158 if ((mh_ratio < 1.0) && (random.uniform() > mh_ratio)) {
00159 move.undo(state);
00160 proposal.result(false);
00161 return false;
00162 }
00163
00164 numMoves++;
00165 logLikelihood = dist.logLikelihood(state);
00166 proposal.result(true);
00167 return true;
00168 }
00169
00173 unsigned long int getNumSteps() const { return numSteps; }
00174
00180 unsigned long int getNumSamples() const {
00181 unsigned long int s = (numSteps / (stride + 1));
00182 return (s > burnIn) ? s - burnIn : 0;
00183 }
00184
00189 bool isSample() const {
00190 return (getNumSamples() > burnIn) &&
00191 ((numSteps % (stride + 1)) == 0);
00192 }
00193
00198 unsigned long int getNumMoves() const { return numMoves; }
00199
00203 double acceptance() const { return double(numMoves) / double(numSteps); }
00204
00205 };
00206
00210 template <class MarkovChainTraits>
00211 class AnnealedMarkovChain : public MarkovChain<MarkovChainTraits> {
00212
00213 public:
00214
00215 typedef typename MarkovChain<MarkovChainTraits>::StateType StateType;
00216 typedef typename MarkovChain<MarkovChainTraits>::DistributionType DistributionType;
00217 typedef typename MarkovChain<MarkovChainTraits>::ProposalType ProposalType;
00218 typedef typename MarkovChain<MarkovChainTraits>::MoveType MoveType;
00219
00220 protected:
00221
00225 unsigned long int duration;
00226
00230 double cold;
00231
00241 double beta1;
00242
00252 double beta2;
00253
00263 double theta;
00264
00265 public:
00266
00287 AnnealedMarkovChain(DistributionType& dist,
00288 ProposalType& proposal,
00289 StateType& state,
00290 const Arak::Util::PropertyMap& props,
00291 Arak::Util::Random& random =
00292 Arak::Util::default_random)
00293 : MarkovChain<MarkovChainTraits>(dist, proposal, state, props, random) {
00294 using namespace Arak::Util;
00295
00296 if (hasp(props, "arak.mcmc.annealing.hot")) {
00297 double hot;
00298 assert(parse(getp(props, "arak.mcmc.annealing.hot"), hot));
00299 assert(parse(getp(props, "arak.mcmc.annealing.cold"), cold));
00300 assert(parse(getp(props, "arak.mcmc.annealing.duration"), duration));
00301
00302 if (hot != cold) {
00303 theta = (ln(hot) - ln(cold)) / ln(duration);
00304 double power = pow(double(duration), theta);
00305 beta1 = (cold - hot / power) * (1.0 - 1.0 / power);
00306 beta2 = hot - beta1;
00307 } else {
00308 beta1 = 0.0;
00309 beta2 = 1.0;
00310 theta = 0.0;
00311 }
00312 } else {
00313 beta1 = 1.0;
00314 beta2 = 0.0;
00315 theta = 0.0;
00316 }
00317 }
00318
00326 virtual bool advance(){
00327 numSteps++;
00328
00329 proposal.sample(state, random, false);
00330 MoveType& move = proposal.move();
00331
00332 double temp;
00333 if (numSteps > duration)
00334 temp = cold;
00335 else
00336 temp = beta1 + beta2 / pow(double(numSteps), theta);
00337
00338 double pll = proposal.ll(state);
00339 double rpll = proposal.rll(state);
00340
00341 double llh = dist.logLikelihood(state, temp);
00342
00343 move.execute(state);
00344 double llh_new = dist.logLikelihood(state, temp);
00345
00346 double mh_log_ratio = (llh_new + rpll) - (llh + pll);
00347 double mh_ratio = exp(mh_log_ratio);
00348
00349 if ((mh_ratio < 1.0) && (random.uniform() > mh_ratio)) {
00350 move.undo(state);
00351 proposal.result(false);
00352 return false;
00353 }
00354
00355 numMoves++;
00356 logLikelihood = dist.logLikelihood(state);
00357 proposal.result(true);
00358 return true;
00359 }
00360
00361 };
00362
00366 template <class MarkovChainTraits>
00367 class StochasticHillClimber : public MarkovChain<MarkovChainTraits> {
00368
00369 public:
00370
00371 typedef typename MarkovChain<MarkovChainTraits>::StateType StateType;
00372 typedef typename MarkovChain<MarkovChainTraits>::DistributionType DistributionType;
00373 typedef typename MarkovChain<MarkovChainTraits>::ProposalType ProposalType;
00374 typedef typename MarkovChain<MarkovChainTraits>::MoveType MoveType;
00375
00388 StochasticHillClimber(DistributionType& dist,
00389 ProposalType& proposal,
00390 StateType& state,
00391 const Arak::Util::PropertyMap& props,
00392 Arak::Util::Random& random = Arak::Util::default_random)
00393 : MarkovChain<MarkovChainTraits>(dist, proposal, state, props, random) { }
00394
00402 virtual bool advance(){
00403 numSteps++;
00404
00405 proposal.sample(state, random, false);
00406 MoveType& move = proposal.move();
00407
00408 move.execute(state);
00409 double llh_new = dist.logLikelihood(state);
00410
00411 if (llh_new <= logLikelihood) {
00412 move.undo(state);
00413 proposal.result(false);
00414 return false;
00415 }
00416
00417 numMoves++;
00418 logLikelihood = llh_new;
00419 proposal.result(true);
00420 return true;
00421 }
00422
00423 };
00424
00425 }
00426
00427 #endif