1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
|
#include <stan/model/model_header.hpp>
#include <Rcpp.h>
#include <RcppEigen.h>
class sparselm_stan {
public: // these would ordinarily be private in the C++ code generated by Stan
Eigen::Map<Eigen::SparseMatrix<double> > X;
Eigen::VectorXd y;
sparselm_stan(Eigen::Map<Eigen::SparseMatrix<double> > X, Eigen::VectorXd y) :
X(X), y(y) {}
template <bool propto__ = false, bool jacobian__ = false, typename T__ = double>
// propto__ is usually true but causes log_prob() to return 0 when called from R
// jacobian__ is usually true for MCMC but typically is false for optimization
T__ log_prob(std::vector<T__>& params_r__) const {
using namespace stan::math;
T__ lp__(0.0);
accumulator<T__> lp_accum__;
// set up model parameters
std::vector<int> params_i__;
stan::io::deserializer<T__> in__(params_r__, params_i__);
auto beta = in__.template read<Eigen::Matrix<T__,-1,1> >(X.cols());
auto sigma = in__.template read_constrain_lb<T__, jacobian__>(0, lp__);
// log-likelihood (should add priors)
lp_accum__.add(lp__);
lp_accum__.add(normal_lpdf<propto__>(y, (X * beta).eval(), sigma));
return lp_accum__.sum();
}
template <bool propto__ = false, bool jacobian__ = false>
std::vector<double> gradient(std::vector<double>& params_r__) const {
// Calculate gradients using reverse-mode autodiff
// although you could do them analytically in this case
using std::vector;
using stan::math::var;
double lp;
std::vector<double> gradient;
try {
vector<var> ad_params_r(params_r__.size());
for (size_t i = 0; i < params_r__.size(); ++i) {
var var_i(params_r__[i]);
ad_params_r[i] = var_i;
}
var adLogProb
= this->log_prob<propto__, jacobian__>(ad_params_r);
lp = adLogProb.val();
adLogProb.grad(ad_params_r, gradient);
} catch (const std::exception &ex) {
stan::math::recover_memory();
throw;
}
stan::math::recover_memory();
return gradient;
}
};
|