Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix SimulatorSamplingScheme for deterministic nodes and update docs #710

Merged
merged 1 commit into from Nov 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 6 additions & 1 deletion docs/src/tutorial/example_milk_producer.jl
Expand Up @@ -161,8 +161,13 @@ model = SDDP.PolicyGraph(
x_stock.in + ω_production + u_spot_buy - x_forward[1].in - u_spot_sell
)
## The random variables. `price` comes from the Markov node
##
## !!! warning
## The elements in Ω MUST be a tuple with 1 or 2 values, where the first
## value is `price` and the second value is the random variable for the
## current node. If the node is deterministic, use Ω = [(price,)].
Ω = [(price, p) for p in Ω_production]
SDDP.parameterize(sp, Ω) do ω::Tuple{Float64,Float64}
SDDP.parameterize(sp, Ω) do ω
## Fix the ω_production variable
fix(ω_production, ω[2])
@stageobjective(
Expand Down
10 changes: 6 additions & 4 deletions src/plugins/sampling_schemes.jl
Expand Up @@ -481,8 +481,9 @@ which returns a `Vector{Float64}` when called with no arguments like
This sampling scheme must be used with a Markovian graph constructed from the
same `simulator`.

The sample space for [`SDDP.parameterize`](@ref) must be a tuple in which the
first element is the Markov state.
The sample space for [`SDDP.parameterize`](@ref) must be a tuple with 1 or 2
values, value is the Markov state and the second value is the random variable
for the current node. If the node is deterministic, use `Ω = [(markov_state,)]`.

This sampling scheme generates a new scenario by calling `simulator()`, and then
picking the sequence of nodes in the Markovian graph that is closest to the new
Expand All @@ -508,7 +509,7 @@ julia> model = SDDP.PolicyGraph(
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
@variable(sp, u >= 0)
@constraint(sp, x.out == x.in - u)
# Elements of Ω must be a tuple in which `markov_state` is the first
# Elements of Ω MUST be a tuple in which `markov_state` is the first
# element.
Ω = [(markov_state, (u = u_max,)) for u_max in (0.0, 0.5)]
SDDP.parameterize(sp, Ω) do (markov_state, ω)
Expand Down Expand Up @@ -559,7 +560,8 @@ function sample_scenario(
noise_terms = get_noise_terms(InSampleMonteCarlo(), node, node_index)
noise = sample_noise(noise_terms)
@assert noise[1] == node_index[2]
push!(scenario_path, (node_index, (value, noise[2])))
ω = length(noise) == 1 ? (value,) : (value, noise[2])
push!(scenario_path, (node_index, ω))
end
return scenario_path, false
end
64 changes: 64 additions & 0 deletions test/plugins/sampling_schemes.jl
Expand Up @@ -318,6 +318,70 @@ function test_OutOfSampleMonteCarlo_initial_node()
end
end

function test_SimulatorSamplingScheme()
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
model = SDDP.PolicyGraph(
graph,
lower_bound = 0.0,
direct_mode = false,
) do sp, node
t, price = node
@variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0)
SDDP.parameterize(sp, [(price,)]) do ω
return SDDP.@stageobjective(sp, price * x.out)
end
end
sampler = SDDP.SimulatorSamplingScheme(simulator)
scenario, _ = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 3
@test haskey(graph.nodes, scenario[1][1])
@test scenario[1][2] in ((40.0,), (50.1,), (59.6,))
return
end

function test_SimulatorSamplingScheme_with_noise()
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
model = SDDP.PolicyGraph(
graph,
lower_bound = 0.0,
direct_mode = false,
) do sp, node
t, price = node
@variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0)
SDDP.parameterize(sp, [(price, i) for i in 1:2]) do ω
return SDDP.@stageobjective(sp, price * x.out + i)
end
end
sampler = SDDP.SimulatorSamplingScheme(simulator)
scenario, _ = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 3
@test haskey(graph.nodes, scenario[1][1])
@test scenario[1][2] isa Tuple{Float64,Int}
@test scenario[1][2][1] in (40.0, 50.1, 59.6)
@test scenario[1][2][2] in 1:3
return
end

end # module

TestSamplingSchemes.runtests()