/
plot_chaos_cantilever_beam_integration.py
176 lines (147 loc) · 5.28 KB
/
plot_chaos_cantilever_beam_integration.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
"""
Create a polynomial chaos metamodel by integration on the cantilever beam
=========================================================================
"""
# %%
# In this example, we create a polynomial chaos metamodel by integration on the
# :ref:`cantilever beam <use-case-cantilever-beam>` example.
# We choose to evaluate the coefficients of the chaos decomposition by
# integration using various kinds of design of experiment:
#
# - Gauss product
# - Latin hypercube sampling
# - Quasi Monte Carlo with a Sobol' sequence
#
# We will compare the results obtained on each design.
# %%
from openturns.usecases import cantilever_beam
import openturns as ot
import openturns.viewer as otv
ot.Log.Show(ot.Log.NONE)
# %%
# We first load the model from the usecases module :
cb = cantilever_beam.CantileverBeam()
# %%
# In this example we consider all marginals independent.
# They are defined in the :class:`~openturns.usecases.cantilever_beam.CantileverBeam` class:
dist_E = cb.E
dist_F = cb.F
dist_L = cb.L
dist_I = cb.II
distribution = cb.independentDistribution
# %%
# We load the model.
dim_input = cb.dim # dimension of the input
dim_output = 1 # dimension of the output
g = cb.model
# %%
# Create a polynomial chaos decomposition
# ---------------------------------------
# %%
# We create the multivariate polynomial basis by tensorization of the
# univariate polynomials and the default linear enumerate rule.
multivariateBasis = ot.OrthogonalProductPolynomialFactory(
[dist_E, dist_F, dist_L, dist_I]
)
# %%
# In this case, we select `P` using the
# :meth:`~openturns.EnumerateFunction.getBasisSizeFromTotalDegree` method,
# so that all polynomials with total degree lower or equal to 5 are used.
# This will lead to the computation of 126 coefficients.
totalDegree = 5
enum_func = multivariateBasis.getEnumerateFunction()
P = enum_func.getBasisSizeFromTotalDegree(totalDegree)
print(f"P={P}")
# %%
# We select the :class:`~openturns.FixedStrategy` truncation rule, which corresponds to using
# the first `P` polynomials of the polynomial basis.
adaptiveStrategy = ot.FixedStrategy(multivariateBasis, P)
# %%
# We begin by getting the standard measure associated with the multivariate polynomial basis.
# We see that the range of the `Beta` distribution has been standardized into the [-1,1] interval.
# This is the same for the `Uniform` distribution and the second `Beta` distribution.
measure = multivariateBasis.getMeasure()
print(f"measure={measure}")
# %%
# The choice of the :class:`~openturns.GaussProductExperiment` rule with 4 nodes
# in each of the 4 dimensions leads to :math:`4^4=256` evaluations of the model.
marginalSizes = [4] * dim_input
experiment = ot.GaussProductExperiment(distribution, marginalSizes)
print(f"N={experiment.getSize()}")
X, W = experiment.generateWithWeights()
Y = g(X)
# %%
# We now set the method used to compute the coefficients; we select the integration method.
projectionStrategy = ot.IntegrationStrategy()
# %%
# We can now create the functional chaos.
algo = ot.FunctionalChaosAlgorithm(
X, W, Y, distribution, adaptiveStrategy, projectionStrategy
)
algo.run()
# %%
# Get the result
result = algo.getResult()
# %%
# The :meth:`~openturns.FunctionalChaosResult.getMetaModel` method returns the metamodel function.
metamodel = result.getMetaModel()
# %%
# Validate the metamodel
# ----------------------
# %%
# Generate a new validation sample (which is independent of the training sample).
n_valid = 1000
X_test = distribution.getSample(n_valid)
Y_test = g(X_test)
# %%
# The :class:`~openturns.MetaModelValidation` class validates the metamodel
# based on a validation sample.
val = ot.MetaModelValidation(X_test, Y_test, metamodel)
# %%
# Compute the :math:`Q^2` predictivity coefficient.
Q2 = val.computePredictivityFactor()[0]
Q2
# %%
# Plot the observed versus the predicted outputs.
graph = val.drawValidation()
graph.setTitle(f"Gauss product N={experiment.getSize()} - Q2={Q2 * 100:.2f}")
view = otv.View(graph)
# %%
# Now repeat the same process on various designs.
def draw_validation(experiment):
projectionStrategy = ot.IntegrationStrategy(experiment)
algo = ot.FunctionalChaosAlgorithm(
X, Y, distribution, adaptiveStrategy, projectionStrategy
)
algo.run()
result = algo.getResult()
metamodel = result.getMetaModel()
X_test = distribution.getSample(n_valid)
Y_test = g(X_test)
val = ot.MetaModelValidation(X_test, Y_test, metamodel)
Q2 = val.computePredictivityFactor()[0]
graph = val.drawValidation()
graph.setTitle(
f"{experiment.__class__.__name__} - N={experiment.getSize()} - Q2={Q2 * 100:.2f}"
)
return graph
# %%
# Use an LHS design.
experiment = ot.LHSExperiment(distribution, int(1e6))
graph = draw_validation(experiment)
view = otv.View(graph)
# %%
# Use a low-discrepancy experiment (Quasi-Monte Carlo).
sequence = ot.SobolSequence()
experiment = ot.LowDiscrepancyExperiment(sequence, distribution, int(1e5))
graph = draw_validation(experiment)
view = otv.View(graph)
otv.View.ShowAll()
# %%
# Conclusion
# ----------
# With the Gauss product rule the coefficients are particularly well computed
# since the Q2 coefficient is excellent, even with the relatively limited amount
# of simulation (256 points).
# On the other hand the LHS and low-discrepancy experiments require many more
# points to achieve a Q2>99%.