1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
|
#! /usr/bin/env python
import openturns as ot
from openturns.experimental import GaussianProcessRegression, GaussianProcessFitter
import openturns.testing as ott
# Test 1
def test_one_input_one_output():
sampleSize = 6
dimension = 1
f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
X = ot.Sample(sampleSize, dimension)
for i in range(sampleSize):
X[i, 0] = 3.0 + i
X[0, 0] = 1.0
X[1, 0] = 3.0
Y = f(X)
# create covariance model
basis = ot.ConstantBasisFactory(dimension).build()
covarianceModel = ot.SquaredExponential()
# create algorithm
fit_algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
# set sensible optimization bounds and estimate hyper parameters
fit_algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
fit_algo.run()
# perform an evaluation
fit_result = fit_algo.getResult()
algo = GaussianProcessRegression(fit_result)
algo.run()
result = algo.getResult()
ott.assert_almost_equal(result.getMetaModel()(X), Y)
ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])
# Prediction accuracy
ott.assert_almost_equal(result.getMetaModel()([7.5]), f([7.5]), 0.3, 0.0)
# Test 2
def test_two_inputs_one_output():
inputDimension = 2
# Learning data
levels = [8, 5]
box = ot.Box(levels)
inputSample = box.generate()
# Scale each direction
inputSample *= 10.0
model = ot.SymbolicFunction(["x", "y"], ["cos(0.5*x) + sin(y)"])
outputSample = model(inputSample)
# Validation
sampleSize = 10
inputValidSample = ot.JointDistribution(2 * [ot.Uniform(0, 10.0)]).getSample(
sampleSize
)
outputValidSample = model(inputValidSample)
# 2) Definition of exponential model
# The parameters have been calibrated using TNC optimization
# and AbsoluteExponential models
scale = [5.33532, 2.61534]
amplitude = [1.61536]
covarianceModel = ot.SquaredExponential(scale, amplitude)
# 3) Basis definition
basis = ot.ConstantBasisFactory(inputDimension).build()
# 4) GPF algorithm
fit_algo = GaussianProcessFitter(inputSample, outputSample, covarianceModel, basis)
# set sensible optimization bounds and estimate hyper parameters
fit_algo.setOptimizationBounds(
ot.Interval(inputSample.getMin(), inputSample.getMax())
)
fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
fit_algo.run()
# perform an evaluation
fit_result = fit_algo.getResult()
# Regression algorithm
algo = GaussianProcessRegression(fit_result)
algo.run()
result = algo.getResult()
# Get meta model
metaModel = result.getMetaModel()
outData = metaModel(inputValidSample)
# 5) Errors
# Interpolation
ott.assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5, 3.0e-5)
# Prediction
ott.assert_almost_equal(outputValidSample, outData, 1.0e-1, 1e-1)
def test_two_outputs():
f = ot.SymbolicFunction(["x"], ["x * sin(x)", "x * cos(x)"])
sampleX = ot.Sample([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]])
sampleY = f(sampleX)
# Build a basis phi from R --> R^2
# phi_{0,0} = phi_{0,1} = x
# phi_{1,0} = phi_{1,1} = x^2
phi0 = ot.AggregatedFunction(
[ot.SymbolicFunction(["x"], ["x"]), ot.SymbolicFunction(["x"], ["x"])]
)
phi1 = ot.AggregatedFunction(
[ot.SymbolicFunction(["x"], ["x^2"]), ot.SymbolicFunction(["x"], ["x^2"])]
)
basis = ot.Basis([phi0, phi1])
covarianceModel = ot.SquaredExponential([1.0])
covarianceModel.setActiveParameter([])
covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
fit_algo = GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
# set sensible optimization bounds and estimate hyper parameters
fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
fit_algo.run()
# perform an evaluation
fit_result = fit_algo.getResult()
algo = GaussianProcessRegression(fit_result)
algo.run()
result = algo.getResult()
mm = result.getMetaModel()
assert mm.getOutputDimension() == 2, "wrong output dim"
ott.assert_almost_equal(mm([5.5]), [-3.88363, 3.90286])
def test_stationary_fun():
# fix https://github.com/openturns/openturns/issues/1861
ot.RandomGenerator.SetSeed(0)
rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
x = ot.Normal().getSample(20)
x.setDescription(["J0"])
y = x + ot.Normal(0, 0.1).getSample(20)
y.setDescription(["G0"])
fit_algo = GaussianProcessFitter(x, y, model, ot.LinearBasisFactory().build())
# set sensible optimization bounds and estimate hyper parameters
fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
fit_algo.run()
# perform an evaluation
fit_result = fit_algo.getResult()
algo = GaussianProcessRegression(fit_result)
algo.run()
result = algo.getResult()
mm = result.getMetaModel()
ott.assert_almost_equal(mm([5.5]), [5.58838])
if __name__ == "__main__":
test_one_input_one_output()
test_two_inputs_one_output()
test_two_outputs()
test_stationary_fun()
|