-
Notifications
You must be signed in to change notification settings - Fork 17
/
runtests.jl
158 lines (141 loc) · 6.02 KB
/
runtests.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
using Random
using LinearAlgebra
using Distributions
using GaussianProcesses
using Test
using CalibrateEmulateSample.MarkovChainMonteCarlo
using CalibrateEmulateSample.ParameterDistributions
using CalibrateEmulateSample.Emulators
using CalibrateEmulateSample.DataContainers
function test_data(; rng_seed = 41, n = 20, var_y = 0.05, rest...)
# Seed for pseudo-random number generator
rng = Random.MersenneTwister(rng_seed)
# We need a GaussianProcess to run MarkovChainMonteCarlo, so let's reconstruct the one
# that's tested in test/GaussianProcesses/runtests.jl Case 1
n = 40 # number of training points
x = 2π * rand(rng, Float64, (1, n)) # predictors/features: 1 × n
σ2_y = reshape([var_y], 1, 1)
y = sin.(x) + rand(rng, Normal(0, σ2_y[1]), (1, n)) # predictands/targets: 1 × n
return y, σ2_y, PairedDataContainer(x, y, data_are_columns = true), rng
end
function test_prior()
### Define prior
umin = -1.0
umax = 6.0
#prior = [Priors.Prior(Uniform(umin, umax), "u")] # prior on u
prior_dist = Parameterized(Normal(0, 1))
prior_constraint = bounded(umin, umax)
prior_name = "u"
return ParameterDistribution(prior_dist, prior_constraint, prior_name)
end
function test_gp_1(y, σ2_y, iopairs::PairedDataContainer; norm_factor = nothing)
gppackage = GPJL()
pred_type = YType()
# Construct kernel:
# Squared exponential kernel (note that hyperparameters are on log scale)
# with observational noise
GPkernel = SE(log(1.0), log(1.0))
gp = GaussianProcess(gppackage; kernel = GPkernel, noise_learn = true, prediction_type = pred_type)
em = Emulator(
gp,
iopairs;
obs_noise_cov = σ2_y,
normalize_inputs = false,
standardize_outputs = false,
standardize_outputs_factors = norm_factor,
retained_svd_frac = 1.0,
)
Emulators.optimize_hyperparameters!(em)
return em
end
function test_gp_2(y, σ2_y, iopairs::PairedDataContainer; norm_factor = nothing)
gppackage = GPJL()
pred_type = YType()
# Construct kernel:
# Squared exponential kernel (note that hyperparameters are on log scale)
# with observational noise
GPkernel = SE(log(1.0), log(1.0))
gp = GaussianProcess(gppackage; kernel = GPkernel, noise_learn = true, prediction_type = pred_type)
em = Emulator(
gp,
iopairs;
obs_noise_cov = σ2_y,
normalize_inputs = false,
standardize_outputs = true,
standardize_outputs_factors = norm_factor,
retained_svd_frac = 0.9,
)
Emulators.optimize_hyperparameters!(em)
return em
end
function mcmc_test_template(
prior::ParameterDistribution,
σ2_y,
em::Emulator;
mcmc_alg = RWMHSampling(),
obs_sample = 1.0,
init_params = 3.0,
step = 0.25,
rng = Random.GLOBAL_RNG,
)
obs_sample = reshape(collect(obs_sample), 1) # scalar or Vector -> Vector
init_params = reshape(collect(init_params), 1) # scalar or Vector -> Vector
mcmc = MCMCWrapper(mcmc_alg, obs_sample, prior, em; init_params = init_params)
# First let's run a short chain to determine a good step size
new_step = optimize_stepsize(mcmc; init_stepsize = step, N = 5000)
# Now begin the actual MCMC
chain = sample(rng, mcmc, 100_000; stepsize = new_step, discard_initial = 1000)
posterior_distribution = get_posterior(mcmc, chain)
#post_mean = mean(posterior, dims=1)[1]
posterior_mean = mean(posterior_distribution)
return new_step, posterior_mean[1]
end
@testset "MarkovChainMonteCarlo" begin
obs_sample = [1.0]
prior = test_prior()
y, σ2_y, iopairs, rng = test_data(; rng_seed = 42, n = 40, var_y = 0.05)
mcmc_params =
Dict(:mcmc_alg => RWMHSampling(), :obs_sample => obs_sample, :init_params => [3.0], :step => 0.5, :rng => rng)
@testset "Constructor: standardize" begin
em = test_gp_1(y, σ2_y, iopairs)
test_obs = MarkovChainMonteCarlo.to_decorrelated(obs_sample, em)
# The MCMC stored a SVD-transformed sample,
# 1.0/sqrt(0.05) * obs_sample ≈ 4.472
@test isapprox(test_obs, (obs_sample ./ sqrt(σ2_y[1, 1])); atol = 1e-2)
end
@testset "Sine GP & RW Metropolis" begin
em_1 = test_gp_1(y, σ2_y, iopairs)
new_step, posterior_mean_1 = mcmc_test_template(prior, σ2_y, em_1; mcmc_params...)
@test isapprox(new_step, 0.5; atol = 0.1)
# difference between mean_1 and ground truth comes from MCMC convergence and GP sampling
@test isapprox(posterior_mean_1, π / 2; atol = 4e-1)
# now test SVD normalization
norm_factor = 10.0
norm_factor = fill(norm_factor, size(y[:, 1])) # must be size of output dim
em_2 = test_gp_2(y, σ2_y, iopairs; norm_factor = norm_factor)
_, posterior_mean_2 = mcmc_test_template(prior, σ2_y, em_2; mcmc_params...)
# difference between mean_1 and mean_2 only from MCMC convergence
@test isapprox(posterior_mean_2, posterior_mean_1; atol = 0.1)
end
@testset "Sine GP & pCN" begin
mcmc_params = Dict(
:mcmc_alg => pCNMHSampling(),
:obs_sample => obs_sample,
:init_params => [3.0],
:step => 0.5,
:rng => rng,
)
em_1 = test_gp_1(y, σ2_y, iopairs)
new_step, posterior_mean_1 = mcmc_test_template(prior, σ2_y, em_1; mcmc_params...)
@test isapprox(new_step, 0.25; atol = 0.1)
# difference between mean_1 and ground truth comes from MCMC convergence and GP sampling
@test isapprox(posterior_mean_1, π / 2; atol = 4e-1)
# now test SVD normalization
norm_factor = 10.0
norm_factor = fill(norm_factor, size(y[:, 1])) # must be size of output dim
em_2 = test_gp_2(y, σ2_y, iopairs; norm_factor = norm_factor)
_, posterior_mean_2 = mcmc_test_template(prior, σ2_y, em_2; mcmc_params...)
# difference between mean_1 and mean_2 only from MCMC convergence
@test isapprox(posterior_mean_2, posterior_mean_1; atol = 0.1)
end
end