-
Notifications
You must be signed in to change notification settings - Fork 17
/
loss_minimization_sparse_eki.jl
115 lines (96 loc) · 3.54 KB
/
loss_minimization_sparse_eki.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# # Minimization of simple loss functions with sparse EKI
#
# First we load the required packages.
using Distributions, LinearAlgebra, Random, Plots
using EnsembleKalmanProcesses
using EnsembleKalmanProcesses.ParameterDistributions
const EKP = EnsembleKalmanProcesses
# ## Loss function with single minimum
#
# Here, we minimize the loss function
# ```math
# G₁(u) = \|u - u_*\| ,
# ```
# where ``u`` is a 2-vector of parameters and ``u_*`` is given; here ``u_* = (1, 0)``.
u★ = [1, 0]
G₁(u) = [sqrt((u[1] - u★[1])^2 + (u[2] - u★[2])^2)]
nothing # hide
# We set the seed for pseudo-random number generator for reproducibility.
rng_seed = 41
rng = Random.seed!(Random.GLOBAL_RNG, rng_seed)
nothing # hide
# We set a stabilization level, which can aid the algorithm convergence
dim_output = 1
stabilization_level = 1e-3
Γ_stabilization = stabilization_level * Matrix(I, dim_output, dim_output)
# The functional is positive so to minimize it we may set the target to be 0,
G_target = [0]
nothing # hide
# ### Prior distributions
#
# As we work with a Bayesian method, we define a prior. This will behave like an "initial guess"
# for the likely region of parameter space we expect the solution to live in. Here we define
# ``Normal(0,2^2)`` distributions with no constraints
prior_u1 = constrained_gaussian("u1", 0, 2, -Inf, Inf)
prior_u2 = constrained_gaussian("u1", 0, 2, -Inf, Inf)
prior = combine_distributions([prior_u1, prior_u2])
nothing # hide
# !!! note
# In this example there are no constraints, therefore no parameter transformations.
# ### Calibration
#
# We choose the number of ensemble members and the number of iterations of the algorithm
N_ensemble = 20
N_iterations = 10
nothing # hide
# The initial ensemble is constructed by sampling the prior
initial_ensemble = EKP.construct_initial_ensemble(rng, prior, N_ensemble)
# Sparse EKI parameters
γ = 1.0
threshold_value = 1e-2
reg = 1e-3
uc_idx = [1, 2]
process = SparseInversion(γ, threshold_value, uc_idx, reg)
# We then initialize the Ensemble Kalman Process algorithm, with the initial ensemble, the
# target, the stabilization and the process type (for sparse EKI this is `SparseInversion`).
ensemble_kalman_process = EKP.EnsembleKalmanProcess(initial_ensemble, G_target, Γ_stabilization, process)
nothing # hide
# Then we calibrate by *(i)* obtaining the parameters, *(ii)* calculate the loss function on
# the parameters (and concatenate), and last *(iii)* generate a new set of parameters using
# the model outputs:
for i in 1:N_iterations
params_i = get_u_final(ensemble_kalman_process)
g_ens = hcat([G₁(params_i[:, i]) for i in 1:N_ensemble]...)
EKP.update_ensemble!(ensemble_kalman_process, g_ens)
end
# and visualize the results:
u_init = get_u_prior(ensemble_kalman_process)
anim_unique_minimum = @animate for i in 1:N_iterations
u_i = get_u(ensemble_kalman_process, i)
plot(
[u★[1]],
[u★[2]],
seriestype = :scatter,
markershape = :star5,
markersize = 11,
markercolor = :red,
label = "optimum u⋆",
)
plot!(
u_i[1, :],
u_i[2, :],
seriestype = :scatter,
xlims = extrema(u_init[1, :]),
ylims = extrema(u_init[2, :]),
xlabel = "u₁",
ylabel = "u₂",
markersize = 5,
markeralpha = 0.6,
markercolor = :blue,
label = "particles",
title = "EKI iteration = " * string(i),
)
end
nothing # hide
# The results show that the minimizer of ``G_1`` is ``u=u_*``.
gif(anim_unique_minimum, "unique_minimum_sparse.gif", fps = 1) # hide