-
Notifications
You must be signed in to change notification settings - Fork 59
/
bellman_functions.jl
817 lines (764 loc) · 27 KB
/
bellman_functions.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
# Copyright (c) 2017-23, Oscar Dowson and SDDP.jl contributors.
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
mutable struct Cut
intercept::Float64
coefficients::Dict{Symbol,Float64}
obj_y::Union{Nothing,NTuple{N,Float64} where {N}}
belief_y::Union{Nothing,Dict{T,Float64} where {T}}
non_dominated_count::Int
constraint_ref::Union{Nothing,JuMP.ConstraintRef}
end
mutable struct SampledState
state::Dict{Symbol,Float64}
obj_y::Union{Nothing,NTuple{N,Float64} where {N}}
belief_y::Union{Nothing,Dict{T,Float64} where {T}}
dominating_cut::Cut
best_objective::Float64
end
mutable struct ConvexApproximation
theta::JuMP.VariableRef
states::Dict{Symbol,JuMP.VariableRef}
objective_states::Union{Nothing,NTuple{N,JuMP.VariableRef} where {N}}
belief_states::Union{Nothing,Dict{T,JuMP.VariableRef} where {T}}
# Storage for cut selection
cuts::Vector{Cut}
sampled_states::Vector{SampledState}
cuts_to_be_deleted::Vector{Cut}
deletion_minimum::Int
function ConvexApproximation(
theta::JuMP.VariableRef,
states::Dict{Symbol,JuMP.VariableRef},
objective_states,
belief_states,
deletion_minimum::Int,
)
return new(
theta,
states,
objective_states,
belief_states,
Cut[],
SampledState[],
Cut[],
deletion_minimum,
)
end
end
_magnitude(x) = abs(x) > 0 ? log10(abs(x)) : 0
function _dynamic_range_warning(intercept, coefficients)
lo = hi = _magnitude(intercept)
lo_v = hi_v = intercept
for v in values(coefficients)
i = _magnitude(v)
if v < lo_v
lo, lo_v = i, v
elseif v > hi_v
hi, hi_v = i, v
end
end
if hi - lo > 10
@warn(
"""Found a cut with a mix of small and large coefficients.
The order of magnitude difference is $(hi - lo).
The smallest cofficient is $(lo_v).
The largest coefficient is $(hi_v).
You can ignore this warning, but it may be an indication of numerical issues.
Consider rescaling your model by using different units, e.g, kilometers instead
of meters. You should also consider reducing the accuracy of your input data (if
you haven't already). For example, it probably doesn't make sense to measure the
inflow into a reservoir to 10 decimal places.""",
maxlog = 1,
)
end
return
end
function _add_cut(
V::ConvexApproximation,
θᵏ::Float64,
πᵏ::Dict{Symbol,Float64},
xᵏ::Dict{Symbol,Float64},
obj_y::Union{Nothing,NTuple{N,Float64}},
belief_y::Union{Nothing,Dict{T,Float64}};
cut_selection::Bool = true,
) where {N,T}
for (key, x) in xᵏ
θᵏ -= πᵏ[key] * x
end
_dynamic_range_warning(θᵏ, πᵏ)
cut = Cut(θᵏ, πᵏ, obj_y, belief_y, 1, nothing)
_add_cut_constraint_to_model(V, cut)
if cut_selection
_cut_selection_update(V, cut, xᵏ)
end
return
end
function _add_cut_constraint_to_model(V::ConvexApproximation, cut::Cut)
model = JuMP.owner_model(V.theta)
yᵀμ = JuMP.AffExpr(0.0)
if V.objective_states !== nothing
for (y, μ) in zip(cut.obj_y, V.objective_states)
JuMP.add_to_expression!(yᵀμ, y, μ)
end
end
if V.belief_states !== nothing
for (k, μ) in V.belief_states
JuMP.add_to_expression!(yᵀμ, cut.belief_y[k], μ)
end
end
expr = @expression(
model,
V.theta + yᵀμ - sum(cut.coefficients[i] * x for (i, x) in V.states)
)
cut.constraint_ref = if JuMP.objective_sense(model) == MOI.MIN_SENSE
@constraint(model, expr >= cut.intercept)
else
@constraint(model, expr <= cut.intercept)
end
return
end
"""
Internal function: calculate the height of `cut` evaluated at `state`.
"""
function _eval_height(cut::Cut, sampled_state::SampledState)
height = cut.intercept
for (key, value) in cut.coefficients
height += value * sampled_state.state[key]
end
return height
end
"""
Internal function: check if the candidate point dominates the incumbent.
"""
function _dominates(candidate, incumbent, minimization::Bool)
return minimization ? candidate >= incumbent : candidate <= incumbent
end
function _cut_selection_update(
V::ConvexApproximation,
cut::Cut,
state::Dict{Symbol,Float64},
)
model = JuMP.owner_model(V.theta)
is_minimization = JuMP.objective_sense(model) == MOI.MIN_SENSE
sampled_state = SampledState(state, cut.obj_y, cut.belief_y, cut, NaN)
sampled_state.best_objective = _eval_height(cut, sampled_state)
# Loop through previously sampled states and compare the height of the most
# recent cut against the current best. If this new cut is an improvement,
# store this one instead.
for old_state in V.sampled_states
# Only compute cut selection at same points in concave space.
if old_state.obj_y != cut.obj_y || old_state.belief_y != cut.belief_y
continue
end
height = _eval_height(cut, old_state)
if _dominates(height, old_state.best_objective, is_minimization)
old_state.dominating_cut.non_dominated_count -= 1
cut.non_dominated_count += 1
old_state.dominating_cut = cut
old_state.best_objective = height
end
end
push!(V.sampled_states, sampled_state)
# Now loop through previously discovered cuts and compare their height at
# `sampled_state`. If a cut is an improvement, add it to a queue to be
# added.
for old_cut in V.cuts
if old_cut.constraint_ref !== nothing
# We only care about cuts not currently in the model.
continue
elseif old_cut.obj_y != sampled_state.obj_y
# Only compute cut selection at same points in objective space.
continue
elseif old_cut.belief_y != sampled_state.belief_y
# Only compute cut selection at same points in belief space.
continue
end
height = _eval_height(old_cut, sampled_state)
if _dominates(height, sampled_state.best_objective, is_minimization)
sampled_state.dominating_cut.non_dominated_count -= 1
old_cut.non_dominated_count += 1
sampled_state.dominating_cut = old_cut
sampled_state.best_objective = height
_add_cut_constraint_to_model(V, old_cut)
end
end
push!(V.cuts, cut)
# Delete cuts that need to be deleted.
for cut in V.cuts
if cut.non_dominated_count < 1
if cut.constraint_ref !== nothing
push!(V.cuts_to_be_deleted, cut)
end
end
end
if length(V.cuts_to_be_deleted) >= V.deletion_minimum
for cut in V.cuts_to_be_deleted
JuMP.delete(model, cut.constraint_ref)
cut.constraint_ref = nothing
cut.non_dominated_count = 0
end
end
empty!(V.cuts_to_be_deleted)
return
end
@enum(CutType, SINGLE_CUT, MULTI_CUT)
# Internal struct: this struct is just a cache for arguments until we can build
# an actual instance of the type T at a later point.
struct InstanceFactory{T}
args::Any
kwargs::Any
InstanceFactory{T}(args...; kwargs...) where {T} = new{T}(args, kwargs)
end
"""
BellmanFunction
A representation of the value function. SDDP.jl uses the following unique
representation of the value function that is undocumented in the literature.
It supports three types of state variables:
1) x - convex "resource" states
2) b - concave "belief" states
3) y - concave "objective" states
In addition, we have three types of cuts:
1) Single-cuts (also called "average" cuts in the literature), which involve
the risk-adjusted expectation of the cost-to-go.
2) Multi-cuts, which use a different cost-to-go term for each realization w.
3) Risk-cuts, which correspond to the facets of the dual interpretation of a
convex risk measure.
Therefore, ValueFunction returns a JuMP model of the following form:
```
V(x, b, y) =
min: μᵀb + νᵀy + θ
s.t. # "Single" / "Average" cuts
μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J
# "Multi" cuts
μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K
# "Risk-set" cuts
θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K
```
"""
mutable struct BellmanFunction
cut_type::CutType
global_theta::ConvexApproximation
local_thetas::Vector{ConvexApproximation}
# Cuts defining the dual representation of the risk measure.
risk_set_cuts::Set{Vector{Float64}}
end
"""
BellmanFunction(;
lower_bound = -Inf,
upper_bound = Inf,
deletion_minimum::Int = 1,
cut_type::CutType = MULTI_CUT,
)
"""
function BellmanFunction(;
lower_bound = -Inf,
upper_bound = Inf,
deletion_minimum::Int = 1,
cut_type::CutType = MULTI_CUT,
)
return InstanceFactory{BellmanFunction}(
lower_bound = lower_bound,
upper_bound = upper_bound,
deletion_minimum = deletion_minimum,
cut_type = cut_type,
)
end
function bellman_term(bellman_function::BellmanFunction)
return bellman_function.global_theta.theta
end
function initialize_bellman_function(
factory::InstanceFactory{BellmanFunction},
model::PolicyGraph{T},
node::Node{T},
) where {T}
lower_bound, upper_bound, deletion_minimum, cut_type =
-Inf, Inf, 0, SINGLE_CUT
if length(factory.args) > 0
error(
"Positional arguments $(factory.args) ignored in BellmanFunction.",
)
end
for (kw, value) in factory.kwargs
if kw == :lower_bound
lower_bound = value
elseif kw == :upper_bound
upper_bound = value
elseif kw == :deletion_minimum
deletion_minimum = value
elseif kw == :cut_type
cut_type = value
else
error(
"Keyword $(kw) not recognised as argument to BellmanFunction.",
)
end
end
if lower_bound == -Inf && upper_bound == Inf
error("You must specify a finite bound on the cost-to-go term.")
end
if length(node.children) == 0
lower_bound = upper_bound = 0.0
end
Θᴳ = @variable(node.subproblem)
lower_bound > -Inf && JuMP.set_lower_bound(Θᴳ, lower_bound)
upper_bound < Inf && JuMP.set_upper_bound(Θᴳ, upper_bound)
# Initialize bounds for the objective states. If objective_state==nothing,
# this check will be skipped by dispatch.
_add_initial_bounds(node.objective_state, Θᴳ)
x′ = Dict(key => var.out for (key, var) in node.states)
obj_μ = node.objective_state !== nothing ? node.objective_state.μ : nothing
belief_μ = node.belief_state !== nothing ? node.belief_state.μ : nothing
return BellmanFunction(
cut_type,
ConvexApproximation(Θᴳ, x′, obj_μ, belief_μ, deletion_minimum),
ConvexApproximation[],
Set{Vector{Float64}}(),
)
end
# Internal function: helper used in _add_initial_bounds.
function _add_objective_state_constraint(
theta::JuMP.VariableRef,
y::NTuple{N,Float64},
μ::NTuple{N,JuMP.VariableRef},
) where {N}
is_finite = [-Inf < y[i] < Inf for i in 1:N]
model = JuMP.owner_model(theta)
lower_bound = JuMP.has_lower_bound(theta) ? JuMP.lower_bound(theta) : -Inf
upper_bound = JuMP.has_upper_bound(theta) ? JuMP.upper_bound(theta) : Inf
if lower_bound ≈ upper_bound ≈ 0.0
@constraint(model, [i = 1:N], μ[i] == 0.0)
return
end
expr = @expression(
model,
sum(y[i] * μ[i] for i in 1:N if is_finite[i]) + theta
)
if lower_bound > -Inf
@constraint(model, expr >= lower_bound)
end
if upper_bound < Inf
@constraint(model, expr <= upper_bound)
end
return
end
# Internal function: When created, θ has bounds of [-M, M], but, since we are
# adding these μ terms, we really want to bound <y, μ> + θ ∈ [-M, M]. We need to
# consider all possible values for `y`. Because the domain of `y` is
# rectangular, we want to add a constraint at each extreme point. This involves
# adding 2^N constraints where N = |μ|. This is only feasible for
# low-dimensional problems, e.g., N < 5.
_add_initial_bounds(::Nothing, ::Any) = nothing
function _add_initial_bounds(obj_state::ObjectiveState, theta)
if length(obj_state.μ) < 5
for y in
Base.product(zip(obj_state.lower_bound, obj_state.upper_bound)...)
_add_objective_state_constraint(theta, y, obj_state.μ)
end
else
_add_objective_state_constraint(
theta,
obj_state.lower_bound,
obj_state.μ,
)
_add_objective_state_constraint(
theta,
obj_state.upper_bound,
obj_state.μ,
)
end
return
end
function refine_bellman_function(
model::PolicyGraph{T},
node::Node{T},
bellman_function::BellmanFunction,
risk_measure::AbstractRiskMeasure,
outgoing_state::Dict{Symbol,Float64},
dual_variables::Vector{Dict{Symbol,Float64}},
noise_supports::Vector,
nominal_probability::Vector{Float64},
objective_realizations::Vector{Float64},
) where {T}
# Sanity checks.
@assert length(dual_variables) ==
length(noise_supports) ==
length(nominal_probability) ==
length(objective_realizations)
# Preliminaries that are common to all cut types.
risk_adjusted_probability = similar(nominal_probability)
offset = adjust_probability(
risk_measure,
risk_adjusted_probability,
nominal_probability,
noise_supports,
objective_realizations,
model.objective_sense == MOI.MIN_SENSE,
)
# The meat of the function.
if bellman_function.cut_type == SINGLE_CUT
return _add_average_cut(
node,
outgoing_state,
risk_adjusted_probability,
objective_realizations,
dual_variables,
offset,
)
else # Add a multi-cut
@assert bellman_function.cut_type == MULTI_CUT
_add_locals_if_necessary(node, bellman_function, length(dual_variables))
return _add_multi_cut(
node,
outgoing_state,
risk_adjusted_probability,
objective_realizations,
dual_variables,
offset,
)
end
end
function _add_average_cut(
node::Node,
outgoing_state::Dict{Symbol,Float64},
risk_adjusted_probability::Vector{Float64},
objective_realizations::Vector{Float64},
dual_variables::Vector{Dict{Symbol,Float64}},
offset::Float64,
)
N = length(risk_adjusted_probability)
@assert N == length(objective_realizations) == length(dual_variables)
# Calculate the expected intercept and dual variables with respect to the
# risk-adjusted probability distribution.
πᵏ = Dict(key => 0.0 for key in keys(outgoing_state))
θᵏ = offset
for i in 1:length(objective_realizations)
p = risk_adjusted_probability[i]
θᵏ += p * objective_realizations[i]
for (key, dual) in dual_variables[i]
πᵏ[key] += p * dual
end
end
# Now add the average-cut to the subproblem. We include the objective-state
# component μᵀy and the belief state (if it exists).
obj_y =
node.objective_state === nothing ? nothing : node.objective_state.state
belief_y =
node.belief_state === nothing ? nothing : node.belief_state.belief
_add_cut(
node.bellman_function.global_theta,
θᵏ,
πᵏ,
outgoing_state,
obj_y,
belief_y,
)
return (
theta = θᵏ,
pi = πᵏ,
x = outgoing_state,
obj_y = obj_y,
belief_y = belief_y,
)
end
function _add_multi_cut(
node::Node,
outgoing_state::Dict{Symbol,Float64},
risk_adjusted_probability::Vector{Float64},
objective_realizations::Vector{Float64},
dual_variables::Vector{Dict{Symbol,Float64}},
offset::Float64,
)
N = length(risk_adjusted_probability)
@assert N == length(objective_realizations) == length(dual_variables)
bellman_function = node.bellman_function
μᵀy = get_objective_state_component(node)
JuMP.add_to_expression!(μᵀy, get_belief_state_component(node))
for i in 1:length(dual_variables)
_add_cut(
bellman_function.local_thetas[i],
objective_realizations[i],
dual_variables[i],
outgoing_state,
node.objective_state === nothing ? nothing :
node.objective_state.state,
node.belief_state === nothing ? nothing : node.belief_state.belief,
)
end
model = JuMP.owner_model(bellman_function.global_theta.theta)
cut_expr = @expression(
model,
sum(
risk_adjusted_probability[i] *
bellman_function.local_thetas[i].theta for i in 1:N
) - (1 - sum(risk_adjusted_probability)) * μᵀy + offset
)
# TODO(odow): should we use `cut_expr` instead?
ξ = copy(risk_adjusted_probability)
if !(ξ in bellman_function.risk_set_cuts) || μᵀy != JuMP.AffExpr(0.0)
push!(bellman_function.risk_set_cuts, ξ)
if JuMP.objective_sense(model) == MOI.MIN_SENSE
@constraint(model, bellman_function.global_theta.theta >= cut_expr)
else
@constraint(model, bellman_function.global_theta.theta <= cut_expr)
end
end
return
end
# If we are adding a multi-cut for the first time, then the local θ variables
# won't have been added.
# TODO(odow): a way to set different bounds for each variable in the multi-cut.
function _add_locals_if_necessary(
node::Node,
bellman_function::BellmanFunction,
N::Int,
)
num_local_thetas = length(bellman_function.local_thetas)
if num_local_thetas == N
return # Do nothing. Already initialized.
elseif num_local_thetas > 0
error(
"Expected $(N) local θ variables but there were " *
"$(num_local_thetas).",
)
end
global_theta = bellman_function.global_theta
model = JuMP.owner_model(global_theta.theta)
local_thetas = @variable(model, [1:N])
if JuMP.has_lower_bound(global_theta.theta)
JuMP.set_lower_bound.(
local_thetas,
JuMP.lower_bound(global_theta.theta),
)
end
if JuMP.has_upper_bound(global_theta.theta)
JuMP.set_upper_bound.(
local_thetas,
JuMP.upper_bound(global_theta.theta),
)
end
for local_theta in local_thetas
push!(
bellman_function.local_thetas,
ConvexApproximation(
local_theta,
global_theta.states,
node.objective_state === nothing ? nothing :
node.objective_state.μ,
node.belief_state === nothing ? nothing : node.belief_state.μ,
global_theta.deletion_minimum,
),
)
end
return
end
"""
write_cuts_to_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = string,
) where {T}
Write the cuts that form the policy in `model` to `filename` in JSON format.
`node_name_parser` is a function which converts the name of each node into a
string representation. It has the signature: `node_name_parser(::T)::String`.
See also [`SDDP.read_cuts_from_file`](@ref).
"""
function write_cuts_to_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = string,
) where {T}
cuts = Dict{String,Any}[]
for (node_name, node) in model.nodes
if node.objective_state !== nothing || node.belief_state !== nothing
error(
"Unable to write cuts to file because model contains " *
"objective states or belief states.",
)
end
node_cuts = Dict(
"node" => node_name_parser(node_name),
"single_cuts" => Dict{String,Any}[],
"multi_cuts" => Dict{String,Any}[],
"risk_set_cuts" => Vector{Float64}[],
)
oracle = node.bellman_function.global_theta
for (cut, state) in zip(oracle.cuts, oracle.sampled_states)
intercept = cut.intercept
for (key, π) in cut.coefficients
intercept += π * state.state[key]
end
push!(
node_cuts["single_cuts"],
Dict(
"intercept" => intercept,
"coefficients" => copy(cut.coefficients),
"state" => copy(state.state),
),
)
end
for (i, theta) in enumerate(node.bellman_function.local_thetas)
for (cut, state) in zip(theta.cuts, theta.sampled_states)
intercept = cut.intercept
for (key, π) in cut.coefficients
intercept += π * state.state[key]
end
push!(
node_cuts["multi_cuts"],
Dict(
"realization" => i,
"intercept" => intercept,
"coefficients" => copy(cut.coefficients),
"state" => copy(state.state),
),
)
end
end
for p in node.bellman_function.risk_set_cuts
push!(node_cuts["risk_set_cuts"], p)
end
push!(cuts, node_cuts)
end
open(filename, "w") do io
return write(io, JSON.json(cuts))
end
return
end
_node_name_parser(::Type{Int}, name::String) = parse(Int, name)
_node_name_parser(::Type{Symbol}, name::String) = Symbol(name)
function _node_name_parser(::Type{NTuple{N,Int}}, name::String) where {N}
keys = parse.(Int, strip.(split(name[2:end-1], ",")))
if length(keys) != N
error("Unable to parse node called $(name). Expected $N elements.")
end
return tuple(keys...)
end
function _node_name_parser(::Any, name)
return error(
"Unable to read name $(name). Provide a custom parser to " *
"`read_cuts_from_file` using the `node_name_parser` keyword.",
)
end
"""
read_cuts_from_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = _node_name_parser,
) where {T}
Read cuts (saved using [`SDDP.write_cuts_to_file`](@ref)) from `filename` into
`model`.
Since `T` can be an arbitrary Julia type, the conversion to JSON is lossy. When
reading, `read_cuts_from_file` only supports `T=Int`, `T=NTuple{N, Int}`, and
`T=Symbol`. If you have manually created a policy graph with a different node
type `T`, provide a function `node_name_parser` with the signature
`node_name_parser(T, name::String)::T where {T}` that returns the name of each
node given the string name `name`.
If `node_name_parser` returns `nothing`, those cuts are skipped.
See also [`SDDP.write_cuts_to_file`](@ref).
"""
function read_cuts_from_file(
model::PolicyGraph{T},
filename::String;
node_name_parser::Function = _node_name_parser,
) where {T}
cuts = JSON.parsefile(filename, use_mmap = false)
for node_cuts in cuts
node_name = node_name_parser(T, node_cuts["node"])::Union{Nothing,T}
if node_name === nothing
continue # Skip reading these cuts
end
node = model[node_name]
bf = node.bellman_function
# Loop through and add the single-cuts.
for json_cut in node_cuts["single_cuts"]
has_state = haskey(json_cut, "state")
state = if has_state
Dict(Symbol(k) => v for (k, v) in json_cut["state"])
else
Dict(Symbol(k) => 0.0 for k in keys(json_cut["coefficients"]))
end
_add_cut(
bf.global_theta,
json_cut["intercept"],
Dict(Symbol(k) => v for (k, v) in json_cut["coefficients"]),
state,
nothing,
nothing;
cut_selection = has_state,
)
end
# Loop through and add the multi-cuts. There are two parts:
# (i) the cuts w.r.t. the state variable x
# (ii) the cuts that define the risk set
# There is one additional complication: if these cuts are being read
# into a new model, the local theta variables may not exist yet.
if length(node_cuts["risk_set_cuts"]) > 0
_add_locals_if_necessary(
node,
bf,
length(first(node_cuts["risk_set_cuts"])),
)
end
for json_cut in node_cuts["multi_cuts"]
has_state = haskey(json_cut, "state")
state = if has_state
Dict(Symbol(k) => v for (k, v) in json_cut["state"])
else
Dict(Symbol(k) => 0.0 for k in keys(json_cut["coefficients"]))
end
_add_cut(
bf.local_thetas[json_cut["realization"]],
json_cut["intercept"],
Dict(Symbol(k) => v for (k, v) in json_cut["coefficients"]),
state,
nothing,
nothing;
cut_selection = has_state,
)
end
# Here is part (ii): adding the constraints that define the risk-set
# representation of the risk measure.
for json_cut in node_cuts["risk_set_cuts"]
expr = @expression(
node.subproblem,
bf.global_theta.theta - sum(
p * V.theta for (p, V) in zip(json_cut, bf.local_thetas)
)
)
if JuMP.objective_sense(node.subproblem) == MOI.MIN_SENSE
@constraint(node.subproblem, expr >= 0)
else
@constraint(node.subproblem, expr <= 0)
end
end
end
return
end
"""
add_all_cuts(model::PolicyGraph)
Add all cuts that may have been deleted back into the model.
## Explanation
During the solve, SDDP.jl may decide to remove cuts for a variety of reasons.
These can include cuts that define the optimal value function, particularly
around the extremes of the state-space (e.g., reservoirs empty).
This function ensures that all cuts discovered are added back into the model.
You should call this after [`train`](@ref) and before [`simulate`](@ref).
"""
function add_all_cuts(model::PolicyGraph)
for node in values(model.nodes)
global_theta = node.bellman_function.global_theta
for cut in global_theta.cuts
if cut.constraint_ref === nothing
_add_cut_constraint_to_model(global_theta, cut)
end
end
for approximation in node.bellman_function.local_thetas
for cut in approximation.cuts
if cut.constraint_ref === nothing
_add_cut_constraint_to_model(approximation, cut)
end
end
end
end
return
end