1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
|
"""
Cross Entropy Importance Sampling
=================================
"""
# %%
#
# The objective is to evaluate a failure probability using Cross Entropy Importance Sampling.
# Two versions in the standard or physical spaces are implemented.
# See :class:`~openturns.StandardSpaceCrossEntropyImportanceSampling` and :class:`~openturns.PhysicalSpaceCrossEntropyImportanceSampling`.
# We consider the simple stress beam example: :ref:`axial stressed beam <use-case-stressed-beam>`.
# %%
# First, we import the python modules:
# %%
import openturns as ot
import openturns.viewer as otv
from openturns.usecases import stressed_beam
# %%
# Create the probabilistic model
# ------------------------------
# %%
axialBeam = stressed_beam.AxialStressedBeam()
distribution = axialBeam.distribution
print("Initial distribution:", distribution)
# %%
# Draw the limit state function :math:`g` to help to understand the shape of the limit state.
# %%
g = axialBeam.model
graph = ot.Graph("Simple stress beam", "R", "F", True, "upper right")
drawfunction = g.draw([1.8e6, 600], [4e6, 950.0], [100] * 2)
graph.add(drawfunction)
view = otv.View(graph)
# %%
# Create the output random vector :math:`Y = g(\vect{X})` with :math:`\vect{X} = (R,F)`.
# %%
X = ot.RandomVector(distribution)
Y = ot.CompositeRandomVector(g, X)
# %%
# Create the event :math:`\{ Y = g(\vect{X}) \leq 0 \}`
# -----------------------------------------------------
# %%
threshold = 0.0
event = ot.ThresholdEvent(Y, ot.Less(), 0.0)
# %%
# Evaluate the probability with the Standard Space Cross Entropy
# --------------------------------------------------------------
# %%
# We choose to set the intermediate quantile level to 0.35.
# %%
standardSpaceIS = ot.StandardSpaceCrossEntropyImportanceSampling(event, 0.35)
# %%
# The sample size at each iteration can be changed by the following accessor:
# %%
standardSpaceIS.setMaximumOuterSampling(1000)
# %%
# Now we can run the algorithm and get the results.
# %%
standardSpaceIS.run()
standardSpaceISResult = standardSpaceIS.getResult()
proba = standardSpaceISResult.getProbabilityEstimate()
print("Proba Standard Space Cross Entropy IS = ", proba)
print(
"Current coefficient of variation = ",
standardSpaceISResult.getCoefficientOfVariation(),
)
# %%
# The length of the confidence interval of level :math:`95\%` is:
# %%
length95 = standardSpaceISResult.getConfidenceLength()
print("Confidence length (0.95) = ", standardSpaceISResult.getConfidenceLength())
# %%
# which enables to build the confidence interval.
# %%
print(
"Confidence interval (0.95) = [",
proba - length95 / 2,
", ",
proba + length95 / 2,
"]",
)
# %%
# We can analyze the simulation budget.
# %%
print(
"Numerical budget: ",
standardSpaceISResult.getBlockSize() * standardSpaceISResult.getOuterSampling(),
)
# %%
# We can also retrieve the optimal auxiliary distribution in the standard space.
# %%
print(
"Auxiliary distribution in Standard space: ",
standardSpaceISResult.getAuxiliaryDistribution(),
)
# %%
# Draw initial samples and final samples
# --------------------------------------
#
# %%
# First we get the auxiliary samples in the standard space and we project them in physical space.
# %%
auxiliaryInputSamples = standardSpaceISResult.getAuxiliaryInputSample()
auxiliaryInputSamplesPhysicalSpace = (
distribution.getInverseIsoProbabilisticTransformation()(auxiliaryInputSamples)
)
# %%
graph = ot.Graph("Cloud of samples and failure domain", "R", "F", True, "upper right")
# Generation of samples with initial distribution
initialSamples = ot.Cloud(
distribution.getSample(1000), "blue", "plus", "Initial samples"
)
auxiliarySamples = ot.Cloud(
auxiliaryInputSamplesPhysicalSpace, "orange", "fsquare", "Auxiliary samples"
)
# Plot failure domain
nx, ny = 50, 50
xx = ot.Box([nx], ot.Interval([1.80e6], [4e6])).generate()
yy = ot.Box([ny], ot.Interval([600.0], [950.0])).generate()
inputData = ot.Box([nx, ny], ot.Interval([1.80e6, 600.0], [4e6, 950.0])).generate()
outputData = g(inputData)
mycontour = ot.Contour(xx, yy, outputData)
mycontour.setLevels([0.0])
mycontour.setLabels(["0.0"])
mycontour.setLegend("Failure domain")
graph.add(initialSamples)
graph.add(auxiliarySamples)
graph.add(mycontour)
view = otv.View(graph)
# %%
# In the previous graph, the blue crosses stand for samples drawn with the initial distribution and the orange squares stand for the samples drawn at the final iteration.
# %%
# Estimation of the probability with the Physical Space Cross Entropy
# -------------------------------------------------------------------
# %%
# For a more advanced usage, it is possible to work in the physical space to specify the auxiliary distribution.
# In this second example, we take the auxiliary distribution in the same family as the initial distribution and we want to optimize all the parameters.
# %%
# The parameters to be optimized are the parameters of the native distribution.
# It is necessary to define among all the distribution parameters, which ones will be optimized through the indices of the parameters.
# In this case, all the parameters will be optimized.
# %%
# Be careful that the native parameters of the auxiliary distribution will be considered.
# Here for the :class:`~openturns.LogNormalMuSigma` distribution, this corresponds
# to `muLog`, `sigmaLog` and `gamma`.
# %%
# The user can use `getParameterDescription()` method to access to the parameters of the auxiliary distribution.
# %%
ot.RandomGenerator.SetSeed(0)
marginR = ot.LogNormalMuSigma().getDistribution()
marginF = ot.Normal()
auxiliaryMarginals = [marginR, marginF]
auxiliaryDistribution = ot.JointDistribution(auxiliaryMarginals)
# Definition of parameters to be optimized
activeParameters = ot.Indices(5)
activeParameters.fill()
# WARNING : native parameters of distribution have to be considered
bounds = ot.Interval([14, 0.01, 0.0, 500, 20], [16, 0.2, 0.1, 1000, 70])
initialParameters = distribution.getParameter()
desc = auxiliaryDistribution.getParameterDescription()
p = auxiliaryDistribution.getParameter()
print(
"Parameters of the auxiliary distribution:",
", ".join([f"{param}: {value:.3f}" for param, value in zip(desc, p)]),
)
physicalSpaceIS1 = ot.PhysicalSpaceCrossEntropyImportanceSampling(
event, auxiliaryDistribution, activeParameters, initialParameters, bounds
)
# %%
# Custom optimization algorithm can be also specified for the auxiliary distribution parameters optimization, here for example we choose :class:`~openturns.TNC`.
# %%
physicalSpaceIS1.setOptimizationAlgorithm(ot.TNC())
# %%
# The number of samples per step can also be specified.
# %%
physicalSpaceIS1.setMaximumOuterSampling(1000)
# %%
# Finally, we run the algorithm and get the results.
# %%
physicalSpaceIS1.run()
physicalSpaceISResult1 = physicalSpaceIS1.getResult()
print("Probability of failure:", physicalSpaceISResult1.getProbabilityEstimate())
print("Coefficient of variation:", physicalSpaceISResult1.getCoefficientOfVariation())
# %%
# We can also decide to optimize only the means of the marginals and keep the other parameters identical to the initial distribution.
# %%
ot.RandomGenerator.SetSeed(0)
marginR = ot.LogNormalMuSigma(3e6, 3e5, 0.0).getDistribution()
marginF = ot.Normal(750.0, 50.0)
auxiliaryMarginals = [marginR, marginF]
auxiliaryDistribution = ot.JointDistribution(auxiliaryMarginals)
print("Parameters of initial distribution", auxiliaryDistribution.getParameter())
# Definition of parameters to be optimized
activeParameters = ot.Indices([0, 3])
# WARNING : native parameters of distribution have to be considered
bounds = ot.Interval([14, 500], [16, 1000])
initialParameters = [15, 750]
physicalSpaceIS2 = ot.PhysicalSpaceCrossEntropyImportanceSampling(
event, auxiliaryDistribution, activeParameters, initialParameters, bounds
)
physicalSpaceIS2.run()
physicalSpaceISResult2 = physicalSpaceIS2.getResult()
print("Probability of failure:", physicalSpaceISResult2.getProbabilityEstimate())
print("Coefficient of variation:", physicalSpaceISResult2.getCoefficientOfVariation())
# %%
# Let us analyze the auxiliary output samples for the two previous simulations.
# We can plot initial (in blue) and auxiliary samples in physical space (orange
# for the first simulation and black for the second simulation).
# %%
graph = ot.Graph("Cloud of samples and failure domain", "R", "F", True, "upper right")
auxiliarySamples1 = ot.Cloud(
physicalSpaceISResult1.getAuxiliaryInputSample(),
"orange",
"fsquare",
"Auxiliary samples, first case",
)
auxiliarySamples2 = ot.Cloud(
physicalSpaceISResult2.getAuxiliaryInputSample(),
"black",
"bullet",
"Auxiliary samples, second case",
)
graph.add(initialSamples)
graph.add(auxiliarySamples1)
graph.add(auxiliarySamples2)
graph.add(mycontour)
view = otv.View(graph)
# %%
# By analyzing the failure samples, one may want to include correlation parameters in the auxiliary distribution.
# In this last example, we add a Normal copula. The correlation parameter will be optimized with associated interval between 0 and 1.
# %%
ot.RandomGenerator.SetSeed(0)
marginR = ot.LogNormalMuSigma(3e6, 3e5, 0.0).getDistribution()
marginF = ot.Normal(750.0, 50.0)
auxiliaryMarginals = [marginR, marginF]
copula = ot.NormalCopula()
auxiliaryDistribution = ot.JointDistribution(auxiliaryMarginals, copula)
desc = auxiliaryDistribution.getParameterDescription()
p = auxiliaryDistribution.getParameter()
print(
"Initial parameters of the auxiliary distribution:",
", ".join([f"{param}: {value:.3f}" for param, value in zip(desc, p)]),
)
# Definition of parameters to be optimized
activeParameters = ot.Indices(6)
activeParameters.fill()
bounds = ot.Interval(
[14, 0.01, 0.0, 500.0, 20.0, 0.0], [16, 0.2, 0.1, 1000.0, 70.0, 1.0]
)
initialParameters = auxiliaryDistribution.getParameter()
physicalSpaceIS3 = ot.PhysicalSpaceCrossEntropyImportanceSampling(
event, auxiliaryDistribution, activeParameters, initialParameters, bounds
)
physicalSpaceIS3.run()
physicalSpaceISResult3 = physicalSpaceIS3.getResult()
desc = physicalSpaceISResult3.getAuxiliaryDistribution().getParameterDescription()
p = physicalSpaceISResult3.getAuxiliaryDistribution().getParameter()
print(
"Optimized parameters of the auxiliary distribution:",
", ".join([f"{param}: {value:.3f}" for param, value in zip(desc, p)]),
)
print("Probability of failure: ", physicalSpaceISResult3.getProbabilityEstimate())
print("Coefficient of variation: ", physicalSpaceISResult3.getCoefficientOfVariation())
# %%
# Finally, we plot the new auxiliary samples in black.
# %%
graph = ot.Graph("Cloud of samples and failure domain", "R", "F", True, "upper right")
auxiliarySamples1 = ot.Cloud(
physicalSpaceISResult1.getAuxiliaryInputSample(),
"orange",
"fsquare",
"Auxiliary samples, first case",
)
auxiliarySamples3 = ot.Cloud(
physicalSpaceISResult3.getAuxiliaryInputSample(),
"black",
"bullet",
"Auxiliary samples, second case",
)
graph.add(initialSamples)
graph.add(auxiliarySamples1)
graph.add(auxiliarySamples3)
graph.add(mycontour)
# sphinx_gallery_thumbnail_number = 4
view = otv.View(graph)
# %%
# The `quantileLevel` parameter can be also changed using the :class:`~openturns.ResourceMap` key : `CrossEntropyImportanceSampling-DefaultQuantileLevel`.
# Be careful that this key changes the value number of both :class:`~openturns.StandardSpaceCrossEntropyImportanceSampling`
# and :class:`~openturns.PhysicalSpaceCrossEntropyImportanceSampling`.
# %%
ot.ResourceMap.SetAsScalar("CrossEntropyImportanceSampling-DefaultQuantileLevel", 0.4)
physicalSpaceIS4 = ot.PhysicalSpaceCrossEntropyImportanceSampling(
event, auxiliaryDistribution, activeParameters, initialParameters, bounds
)
print("Modified quantile level:", physicalSpaceIS4.getQuantileLevel())
# %%
# The optimized auxiliary distribution with a dependency between the two marginals allows one to better fit the failure domain, resulting in a lower coefficient of variation.
# %%
otv.View.ShowAll()
|