1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
|
#! /usr/bin/env python
import openturns as ot
from openturns.testing import assert_almost_equal
f = ot.SymbolicFunction("x", "x^2")
f = ot.MemoizeFunction(f)
f.disableHistory()
print(f)
size = 4
inputSample = ot.Sample(size, 1)
for i in range(size):
inputSample[i, 0] = i
outputSample = f(inputSample)
print("Is history enabled for f? ", f.isHistoryEnabled())
print("input history=", f.getInputHistory())
print("output history=", f.getOutputHistory())
f.enableHistory()
outputSample = f(inputSample)
print("Is history enabled for f? ", f.isHistoryEnabled())
print("input history=", f.getInputHistory())
print("output history=", f.getOutputHistory())
f.clearHistory()
print("Is history enabled for f? ", f.isHistoryEnabled())
print("input history=", f.getInputHistory())
print("output history=", f.getOutputHistory())
# Perform the computation twice
outputSample = f(inputSample)
outputSample = f(inputSample)
print("input history=", f.getInputHistory())
print("output history=", f.getOutputHistory())
mem = ot.MemoizeFunction(ot.SymbolicFunction(["x1", "x2"], ["x1+x2", "3*(x1+x2)"]))
mem([1, 2])
mem2 = ot.MemoizeFunction(mem.getMarginal(1))
mem2([1, 2])
print("mem2.in", mem2.getCacheInput())
print("mem2.out", mem2.getCacheOutput())
print("mem2.hits", mem2.getCacheHits())
# check getCacheInput/getCacheOutput order
for i in range(5):
mem([i + 2, i + 3])
assert mem(mem.getCacheInput()) == mem.getCacheOutput(), "wrong order"
print("ok")
ot.PlatformInfo.SetNumericalPrecision(20)
def py_f(X):
return X
# Check if the meoization propagates through the finite difference gradients
# Here we use a PythonFunction as its gradient/hessian are based on finite
# differences by default
ot_f = ot.MemoizeFunction(ot.PythonFunction(3, 3, py_f))
x = [1.0, 2.0, 3.0]
n_calls_0 = ot_f.getEvaluationCallsNumber()
res_f = ot_f(x)
res_grad = ot_f.gradient(x)
res_hess = ot_f.hessian(x)
n_calls_1 = ot_f.getEvaluationCallsNumber()
# 25=1+6+18
assert_almost_equal(n_calls_1 - n_calls_0, 25, 0.0, 0.0)
# Do the computation once again
n_calls_0 = n_calls_1
res_f = ot_f(x)
res_grad = ot_f.gradient(x)
res_hess = ot_f.hessian(x)
n_calls_1 = ot_f.getEvaluationCallsNumber()
# 0=everything is reused
assert_almost_equal(n_calls_1 - n_calls_0, 0, 0.0, 0.0)
# Now, switch to noncentered gradients to reduce the calls to the minimum
eps = 1e-8
gr_f = ot.NonCenteredFiniteDifferenceGradient(eps, ot_f.getEvaluation())
ot_f.setGradient(gr_f)
x = [3, 1, 2]
n_calls_0 = n_calls_1
res_f = ot_f(x)
res_grad = ot_f.gradient(x)
res_hess = ot_f.hessian(x)
n_calls_1 = ot_f.getEvaluationCallsNumber()
# 22=1+3(3+1 reused)+18
assert_almost_equal(n_calls_1 - n_calls_0, 22, 0.0, 0.0)
# check that marginals share the same cache
def f_py(x):
f_py.n += 1
x0, x1 = x
return [x0 + x1, x0 - x1]
f_py.n = 0
f_test = ot.MemoizeFunction(ot.PythonFunction(2, 2, f_py))
f1 = f_test.getMarginal(0)
f2 = f_test.getMarginal(1)
x = [1, 2]
f1(x)
f2(x)
assert f_py.n == 1, "only one eval"
# here we want f modified by ref though evals on f0
f = ot.MemoizeFunction(ot.SymbolicFunction(["x1", "x2"], ["x1+x2", "x1*x2"]))
x = ot.Normal(2).getSample(10)
f0 = ot.MemoizeFunction(f.getMarginal([0]))
f0(x)
assert f.getInputHistory().getSize() == len(x), "wrong size"
assert f.getCacheInput().getSize() == len(x), "wrong size"
|