1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
|
@q $Id$ @>
@q Copyright 2007, Ondra Kamenik @>
@ Start of {\tt normal\_conjugate.cpp} file.
@c
#include "normal_conjugate.h"
#include "kord_exception.h"
@<|NormalConj| diffuse prior constructor@>;
@<|NormalConj| data update constructor@>;
@<|NormalConj| copy constructor@>;
@<|NormalConj::update| one observation code@>;
@<|NormalConj::update| multiple observations code@>;
@<|NormalConj::update| with |NormalConj| code@>;
@<|NormalConj::getVariance| code@>;
@
@<|NormalConj| diffuse prior constructor@>=
NormalConj::NormalConj(int d)
: mu(d), kappa(0), nu(-1), lambda(d,d)
{
mu.zeros();
lambda.zeros();
}
@
@<|NormalConj| data update constructor@>=
NormalConj::NormalConj(const ConstTwoDMatrix& ydata)
: mu(ydata.numRows()), kappa(ydata.numCols()), nu(ydata.numCols()-1),
lambda(ydata.numRows(), ydata.numRows())
{
mu.zeros();
for (int i = 0; i < ydata.numCols(); i++)
mu.add(1.0/ydata.numCols(), ConstVector(ydata, i));
lambda.zeros();
for (int i = 0; i < ydata.numCols(); i++) {
Vector diff(ConstVector(ydata, i));
diff.add(-1, mu);
lambda.addOuter(diff);
}
}
@
@<|NormalConj| copy constructor@>=
NormalConj::NormalConj(const NormalConj& nc)
: mu(nc.mu), kappa(nc.kappa), nu(nc.nu), lambda(nc.lambda)
{
}
@ The method performs the following:
$$\eqalign{
\mu_1 = &\; {\kappa_0\over \kappa_0+1}\mu_0 + {1\over \kappa_0+1}y\cr
\kappa_1 = &\; \kappa_0 + 1\cr
\nu_1 = &\; \nu_0 + 1\cr
\Lambda_1 = &\; \Lambda_0 + {\kappa_0\over\kappa_0+1}(y-\mu_0)(y-\mu_0)^T,
}$$
@<|NormalConj::update| one observation code@>=
void NormalConj::update(const ConstVector& y)
{
KORD_RAISE_IF(y.length() != mu.length(),
"Wrong length of a vector in NormalConj::update");
mu.mult(kappa/(1.0+kappa));
mu.add(1.0/(1.0+kappa), y);
Vector diff(y);
diff.add(-1, mu);
lambda.addOuter(diff, kappa/(1.0+kappa));
kappa++;
nu++;
}
@ The method evaluates the formula in the header file.
@<|NormalConj::update| multiple observations code@>=
void NormalConj::update(const ConstTwoDMatrix& ydata)
{
NormalConj nc(ydata);
update(nc);
}
@
@<|NormalConj::update| with |NormalConj| code@>=
void NormalConj::update(const NormalConj& nc)
{
double wold = ((double)kappa)/(kappa+nc.kappa);
double wnew = 1-wold;
mu.mult(wold);
mu.add(wnew, nc.mu);
Vector diff(nc.mu);
diff.add(-1, mu);
lambda.add(1.0, nc.lambda);
lambda.addOuter(diff);
kappa = kappa + nc.kappa;
nu = nu + nc.kappa;
}
@ This returns ${1\over \nu-d-1}\Lambda$, which is the mean of the
variance in the posterior distribution. If the number of degrees of
freedom is less than $d$, then NaNs are returned.
@<|NormalConj::getVariance| code@>=
void NormalConj::getVariance(TwoDMatrix& v) const
{
if (nu > getDim()+1) {
v = (const TwoDMatrix&)lambda;
v.mult(1.0/(nu-getDim()-1));
} else
v.nans();
}
@ End of {\tt normal\_conjugate.cpp} file.
|