From 39be1cb4a0dff703d6ee4c8d98502a6a38d91380 Mon Sep 17 00:00:00 2001 From: odow Date: Tue, 14 Nov 2023 10:37:51 +1300 Subject: [PATCH] Fix SimulatorSamplingScheme for deterministic nodes and update docs --- docs/src/tutorial/example_milk_producer.jl | 7 ++- src/plugins/sampling_schemes.jl | 10 ++-- test/plugins/sampling_schemes.jl | 64 ++++++++++++++++++++++ 3 files changed, 76 insertions(+), 5 deletions(-) diff --git a/docs/src/tutorial/example_milk_producer.jl b/docs/src/tutorial/example_milk_producer.jl index ea305ca99..b76b74956 100644 --- a/docs/src/tutorial/example_milk_producer.jl +++ b/docs/src/tutorial/example_milk_producer.jl @@ -161,8 +161,13 @@ model = SDDP.PolicyGraph( x_stock.in + ω_production + u_spot_buy - x_forward[1].in - u_spot_sell ) ## The random variables. `price` comes from the Markov node + ## + ## !!! warning + ## The elements in Ω MUST be a tuple with 1 or 2 values, where the first + ## value is `price` and the second value is the random variable for the + ## current node. If the node is deterministic, use Ω = [(price,)]. Ω = [(price, p) for p in Ω_production] - SDDP.parameterize(sp, Ω) do ω::Tuple{Float64,Float64} + SDDP.parameterize(sp, Ω) do ω ## Fix the ω_production variable fix(ω_production, ω[2]) @stageobjective( diff --git a/src/plugins/sampling_schemes.jl b/src/plugins/sampling_schemes.jl index bbf18c409..f3ee37195 100644 --- a/src/plugins/sampling_schemes.jl +++ b/src/plugins/sampling_schemes.jl @@ -481,8 +481,9 @@ which returns a `Vector{Float64}` when called with no arguments like This sampling scheme must be used with a Markovian graph constructed from the same `simulator`. -The sample space for [`SDDP.parameterize`](@ref) must be a tuple in which the -first element is the Markov state. +The sample space for [`SDDP.parameterize`](@ref) must be a tuple with 1 or 2 +values, value is the Markov state and the second value is the random variable +for the current node. If the node is deterministic, use `Ω = [(markov_state,)]`. This sampling scheme generates a new scenario by calling `simulator()`, and then picking the sequence of nodes in the Markovian graph that is closest to the new @@ -508,7 +509,7 @@ julia> model = SDDP.PolicyGraph( @variable(sp, x >= 0, SDDP.State, initial_value = 1) @variable(sp, u >= 0) @constraint(sp, x.out == x.in - u) - # Elements of Ω must be a tuple in which `markov_state` is the first + # Elements of Ω MUST be a tuple in which `markov_state` is the first # element. Ω = [(markov_state, (u = u_max,)) for u_max in (0.0, 0.5)] SDDP.parameterize(sp, Ω) do (markov_state, ω) @@ -559,7 +560,8 @@ function sample_scenario( noise_terms = get_noise_terms(InSampleMonteCarlo(), node, node_index) noise = sample_noise(noise_terms) @assert noise[1] == node_index[2] - push!(scenario_path, (node_index, (value, noise[2]))) + ω = length(noise) == 1 ? (value,) : (value, noise[2]) + push!(scenario_path, (node_index, ω)) end return scenario_path, false end diff --git a/test/plugins/sampling_schemes.jl b/test/plugins/sampling_schemes.jl index c0ac485e1..4e4352179 100644 --- a/test/plugins/sampling_schemes.jl +++ b/test/plugins/sampling_schemes.jl @@ -318,6 +318,70 @@ function test_OutOfSampleMonteCarlo_initial_node() end end +function test_SimulatorSamplingScheme() + function simulator() + inflow = zeros(3) + current = 50.0 + Ω = [-10.0, 0.1, 9.6] + for t in 1:3 + current += rand(Ω) + inflow[t] = current + end + return inflow + end + graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30) + model = SDDP.PolicyGraph( + graph, + lower_bound = 0.0, + direct_mode = false, + ) do sp, node + t, price = node + @variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0) + SDDP.parameterize(sp, [(price,)]) do ω + return SDDP.@stageobjective(sp, price * x.out) + end + end + sampler = SDDP.SimulatorSamplingScheme(simulator) + scenario, _ = SDDP.sample_scenario(model, sampler) + @test length(scenario) == 3 + @test haskey(graph.nodes, scenario[1][1]) + @test scenario[1][2] in ((40.0,), (50.1,), (59.6,)) + return +end + +function test_SimulatorSamplingScheme_with_noise() + function simulator() + inflow = zeros(3) + current = 50.0 + Ω = [-10.0, 0.1, 9.6] + for t in 1:3 + current += rand(Ω) + inflow[t] = current + end + return inflow + end + graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30) + model = SDDP.PolicyGraph( + graph, + lower_bound = 0.0, + direct_mode = false, + ) do sp, node + t, price = node + @variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0) + SDDP.parameterize(sp, [(price, i) for i in 1:2]) do ω + return SDDP.@stageobjective(sp, price * x.out + i) + end + end + sampler = SDDP.SimulatorSamplingScheme(simulator) + scenario, _ = SDDP.sample_scenario(model, sampler) + @test length(scenario) == 3 + @test haskey(graph.nodes, scenario[1][1]) + @test scenario[1][2] isa Tuple{Float64,Int} + @test scenario[1][2][1] in (40.0, 50.1, 59.6) + @test scenario[1][2][2] in 1:3 + return +end + end # module TestSamplingSchemes.runtests()