-
-
Notifications
You must be signed in to change notification settings - Fork 74
/
test_umath.py
227 lines (177 loc) · 7.36 KB
/
test_umath.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
"""
Tests of the code in uncertainties.umath.
These tests can be run through the Nose testing framework.
(c) 2010 by Eric O. LEBIGOT (EOL).
"""
from __future__ import division
# Standard modules
import sys
import math
# Local modules:
import uncertainties
import uncertainties.umath as umath
import test_uncertainties
from uncertainties import __author__
###############################################################################
# Unit tests
def test_fixed_derivatives_math_funcs():
"""
Comparison between function derivatives and numerical derivatives.
This comparison is useful for derivatives that are analytical.
"""
for name in umath.many_scalars_to_scalar_funcs:
# print "Checking %s..." % name
func = getattr(umath, name)
# Numerical derivatives of func: the nominal value of func() results
# is used as the underlying function:
numerical_derivatives = uncertainties.NumericalDerivatives(
lambda *args: func(*args))
test_uncertainties._compare_derivatives(func, numerical_derivatives)
# Functions that are not in umath.many_scalars_to_scalar_funcs:
##
# modf(): returns a tuple:
def frac_part_modf(x):
return umath.modf(x)[0]
def int_part_modf(x):
return umath.modf(x)[1]
test_uncertainties._compare_derivatives(
frac_part_modf,
uncertainties.NumericalDerivatives(
lambda x: frac_part_modf(x)))
test_uncertainties._compare_derivatives(
int_part_modf,
uncertainties.NumericalDerivatives(
lambda x: int_part_modf(x)))
##
# frexp(): returns a tuple:
def mantissa_frexp(x):
return umath.frexp(x)[0]
def exponent_frexp(x):
return umath.frexp(x)[1]
test_uncertainties._compare_derivatives(
mantissa_frexp,
uncertainties.NumericalDerivatives(
lambda x: mantissa_frexp(x)))
test_uncertainties._compare_derivatives(
exponent_frexp,
uncertainties.NumericalDerivatives(
lambda x: exponent_frexp(x)))
def test_compound_expression():
"""
Test equality between different formulas.
"""
x = uncertainties.ufloat((3, 0.1))
# Prone to numerical errors (but not much more than floats):
assert umath.tan(x) == umath.sin(x)/umath.cos(x)
def test_numerical_example():
"Test specific numerical examples"
x = uncertainties.ufloat((3.14, 0.01))
result = umath.sin(x)
# In order to prevent big errors such as a wrong, constant value
# for all analytical and numerical derivatives, which would make
# test_fixed_derivatives_math_funcs() succeed despite incorrect
# calculations:
assert ("%.6f +/- %.6f" % (result.nominal_value, result.std_dev())
== "0.001593 +/- 0.010000")
# Regular calculations should still work:
assert("%.11f" % umath.sin(3) == "0.14112000806")
def test_monte_carlo_comparison():
"""
Full comparison to a Monte-Carlo calculation.
Both the nominal values and the covariances are compared between
the direct calculation performed in this module and a Monte-Carlo
simulation.
"""
try:
import numpy
import numpy.random
except ImportError:
import warnings
warnings.warn("Test not performed because NumPy is not available")
return
# Works on numpy.arrays of Variable objects (whereas umath.sin()
# does not):
sin_uarrayncert = numpy.vectorize(umath.sin, otypes=[object])
# Example expression (with correlations, and multiple variables combined
# in a non-linear way):
def function(x, y):
"""
Function that takes two NumPy arrays of the same size.
"""
# The uncertainty due to x is about equal to the uncertainty
# due to y:
return 10 * x**2 - x * sin_uarrayncert(y**3)
x = uncertainties.ufloat((0.2, 0.01))
y = uncertainties.ufloat((10, 0.001))
function_result_this_module = function(x, y)
nominal_value_this_module = function_result_this_module.nominal_value
# Covariances "f*f", "f*x", "f*y":
covariances_this_module = numpy.array(uncertainties.covariance_matrix(
(x, y, function_result_this_module)))
def monte_carlo_calc(n_samples):
"""
Calculate function(x, y) on n_samples samples and returns the
median, and the covariances between (x, y, function(x, y)).
"""
# Result of a Monte-Carlo simulation:
x_samples = numpy.random.normal(x.nominal_value, x.std_dev(),
n_samples)
y_samples = numpy.random.normal(y.nominal_value, y.std_dev(),
n_samples)
function_samples = function(x_samples, y_samples)
cov_mat = numpy.cov([x_samples, y_samples], function_samples)
return (numpy.median(function_samples), cov_mat)
(nominal_value_samples, covariances_samples) = monte_carlo_calc(1000000)
## Comparison between both results:
# The covariance matrices must be close:
# We rely on the fact that covariances_samples very rarely has
# null elements:
assert numpy.vectorize(test_uncertainties._numbers_close)(
covariances_this_module,
covariances_samples,
0.05).all(), (
"The covariance matrices do not coincide between"
" the Monte-Carlo simulation and the direct calculation:\n"
"* Monte-Carlo:\n%s\n* Direct calculation:\n%s"
% (covariances_samples, covariances_this_module)
)
# The nominal values must be close:
assert test_uncertainties._numbers_close(
nominal_value_this_module,
nominal_value_samples,
# The scale of the comparison depends on the standard
# deviation: the nominal values can differ by a fraction of
# the standard deviation:
math.sqrt(covariances_samples[2, 2])
/ abs(nominal_value_samples) * 0.5), (
"The nominal value (%f) does not coincide with that of"
" the Monte-Carlo simulation (%f), for a standard deviation of %f."
% (nominal_value_this_module,
nominal_value_samples,
math.sqrt(covariances_samples[2, 2]))
)
def test_math_module():
"Operations with the math module"
x = uncertainties.ufloat((-1.5, 0.1))
# The exponent must not be differentiated, when calculating the
# following (the partial derivative with respect to the exponent
# is not defined):
assert (x**2).nominal_value == 2.25
# Regular operations are chosen to be unchanged:
assert isinstance(umath.sin(3), float)
# Python >=2.6 functions:
if sys.version_info >= (2, 6):
# factorial() must not be "damaged" by the umath module, so as
# to help make it a drop-in replacement for math (even though
# factorial() does not work on numbers with uncertainties
# because it is restricted to integers, as for
# math.factorial()):
assert umath.factorial(4) == 24
# Boolean functions:
assert not umath.isinf(x)
# Comparison, possibly between an AffineScalarFunc object and a
# boolean, which makes things more difficult for this code:
assert umath.isinf(x) == False
# fsum is special because it does not take a fixed number of
# variables:
assert umath.fsum([x, x]).nominal_value == -3