1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
|
################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `vmp` module.
"""
import numpy as np
from scipy import special
from numpy import testing
from bayespy.nodes import (Gaussian,
GaussianARD,
GaussianGamma,
Gamma,
Wishart)
from ..vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestVB(TestCase):
def test_annealing(self):
X = GaussianARD(3, 4)
X.initialize_from_parameters(-1, 6)
Q = VB(X)
Q.set_annealing(0.1)
#
# Check that the gradient is correct
#
# Initial parameters
phi0 = X.phi
# Gradient
rg = X.get_riemannian_gradient()
g = X.get_gradient(rg)
# Numerical gradient of the first parameter
eps = 1e-6
p0 = X.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [(), ()]
e = eps
p1 = p0[0] + e
X.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0] = (l1 - l0) / eps
# Numerical gradient of the second parameter
p1 = p0[1] + e
X.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1] = (l1 - l0) / (eps)
# Check
self.assertAllClose(g[0],
g_num[0])
self.assertAllClose(g[1],
g_num[1])
#
# Gradient should be zero after updating
#
X.update()
# Initial parameters
phi0 = X.phi
# Numerical gradient of the first parameter
eps = 1e-8
p0 = X.get_parameters()
l0 = Q.compute_lowerbound(ignore_masked=False)
g_num = [(), ()]
e = eps
p1 = p0[0] + e
X.set_parameters([p1, p0[1]])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[0] = (l1 - l0) / eps
# Numerical gradient of the second parameter
p1 = p0[1] + e
X.set_parameters([p0[0], p1])
l1 = Q.compute_lowerbound(ignore_masked=False)
g_num[1] = (l1 - l0) / (eps)
# Check
self.assertAllClose(0,
g_num[0],
atol=1e-5)
self.assertAllClose(0,
g_num[1],
atol=1e-5)
# Not at the optimum
X.initialize_from_parameters(-1, 6)
# Initial parameters
phi0 = X.phi
# Gradient
g = X.get_riemannian_gradient()
# Parameters after VB-EM update
X.update()
phi1 = X.phi
# Check
self.assertAllClose(g[0],
phi1[0] - phi0[0])
self.assertAllClose(g[1],
phi1[1] - phi0[1])
pass
|