Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adaptive exponential Rosenbrock integrators #463

Merged
merged 16 commits into from
Aug 6, 2018
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/OrdinaryDiffEq.jl
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ module OrdinaryDiffEq
export GenericIIF1, GenericIIF2

export LawsonEuler, NorsettEuler, ETD1, ETDRK2, ETDRK3, ETDRK4, HochOst4, Exp4, EPIRK4s3A, EPIRK4s3B,
EPIRK5s3, EXPRB53s3, EPIRK5P1, EPIRK5P2, ETD2
EPIRK5s3, EXPRB53s3, EPIRK5P1, EPIRK5P2, ETD2, Exprb32

export SymplecticEuler, VelocityVerlet, VerletLeapfrog, PseudoVerletLeapfrog,
McAte2, Ruth3, McAte3, CandyRoz4, McAte4, McAte42, McAte5,
Expand Down
2 changes: 2 additions & 0 deletions src/alg_utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ alg_order(alg::EPIRK5P2) = 5
alg_order(alg::EXPRB53s3) = 5
alg_order(alg::SplitEuler) = 1
alg_order(alg::ETD2) = 2
alg_order(alg::Exprb32) = 3
alg_order(alg::Anas5) = 5

alg_order(alg::SymplecticEuler) = 1
Expand Down Expand Up @@ -276,6 +277,7 @@ alg_adaptive_order(alg::ImplicitMidpoint) = 1
# this is actually incorrect and is purposefully decreased as this tends
# to track the real error much better

alg_adaptive_order(alg::Exprb32) = 2
alg_adaptive_order(alg::AN5) = 5

beta2_default(alg::OrdinaryDiffEqAlgorithm) = 2//(5alg_order(alg))
Expand Down
5 changes: 5 additions & 0 deletions src/algorithms.jl
Original file line number Diff line number Diff line change
Expand Up @@ -837,6 +837,11 @@ for Alg in [:Exp4, :EPIRK4s3A, :EPIRK4s3B, :EPIRK5s3, :EXPRB53s3, :EPIRK5P1, :EP
end
struct SplitEuler <: OrdinaryDiffEqExponentialAlgorithm end
struct ETD2 <: OrdinaryDiffEqExponentialAlgorithm end
struct Exprb32 <: OrdinaryDiffEqAdaptiveExponentialAlgorithm
m::Int
iop::Int
end
Exprb32(;m=30, iop=0) = Exprb32(m, iop)

#########################################

Expand Down
30 changes: 30 additions & 0 deletions src/caches/linear_nonlinear_caches.jl
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,36 @@ function alg_cache(alg::EPIRK5P2,u,rate_prototype,uEltypeNoUnits,uBottomEltypeNo
EPIRK5P2Cache(u,uprev,tmp,rtmp,rtmp2,dR,K,J,B,KsCache)
end

####################################
# Adaptive exponential Rosenbrock method caches
struct Exprb32ConstantCache <: OrdinaryDiffEqConstantCache end
alg_cache(alg::Exprb32,u,rate_prototype,uEltypeNoUnits,uBottomEltypeNoUnits,
tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,p,calck,::Type{Val{false}}) = Exprb32ConstantCache()

struct Exprb32Cache{uType,rateType,JType,KsType} <: OrdinaryDiffEqMutableCache
u::uType
uprev::uType
utilde::uType
tmp::uType
rtmp::rateType
F2::rateType
J::JType
KsCache::KsType
end
function alg_cache(alg::Exprb32,u,rate_prototype,uEltypeNoUnits,uBottomEltypeNoUnits,tTypeNoUnits,uprev,uprev2,f,t,dt,reltol,p,calck,::Type{Val{true}})
utilde, tmp = (similar(u) for i = 1:2) # uType caches
rtmp, F2 = (zero(rate_prototype) for i = 1:2) # rateType caches
# Jacobian and Krylov-related caches
n = length(u); T = eltype(u)
m = min(alg.m, n)
J = isa(f, SplitFunction) ? nothing : deepcopy(f.jac_prototype)
Ks = KrylovSubspace{T}(n, m)
phiv_cache = PhivCache{T}(m, 3)
w1 = Matrix{T}(undef, n, 4); w2 = Matrix{T}(undef, n, 4); ws = [w1, w2]
KsCache = (Ks, phiv_cache, ws)
Exprb32Cache(u,uprev,utilde,tmp,rtmp,F2,J,KsCache)
end

####################################
# Multistep exponential method caches

Expand Down
86 changes: 86 additions & 0 deletions src/perform_step/exponential_rk_perform_step.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1158,6 +1158,92 @@ function perform_step!(integrator, cache::EPIRK5P2Cache, repeat_step=false)
# integrator.k is automatically set due to aliasing
end

######################################################
# Adaptive exponential Rosenbrock integrators
function initialize!(integrator, cache::Exprb32ConstantCache)
# Pre-start fsal
integrator.fsalfirst = integrator.f(integrator.uprev, integrator.p, integrator.t)
integrator.fsallast = zero(integrator.fsalfirst)

# Initialize interpolation derivatives
integrator.kshortsize = 2
integrator.k = typeof(integrator.k)(undef, integrator.kshortsize)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
end
function initialize!(integrator, cache::Exprb32Cache)
# Pre-start fsal
integrator.fsalfirst = zero(cache.rtmp)
integrator.f(integrator.fsalfirst, integrator.uprev, integrator.p, integrator.t)
integrator.fsallast = zero(integrator.fsalfirst)

# Initialize interpolation derivatives
integrator.kshortsize = 2
resize!(integrator.k, integrator.kshortsize)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
end

function perform_step!(integrator, cache::Exprb32ConstantCache, repeat_step=false)
@unpack t,dt,uprev,f,p = integrator
is_compos = isa(integrator.alg, CompositeAlgorithm)
A = isa(f, SplitFunction) ? f.f1 : calc_J!(integrator,cache,is_compos) # get linear operator
alg = unwrap_alg(integrator, true)

F1 = integrator.fsalfirst
w1 = phiv(dt, A, F1, 3; m=min(alg.m, size(A,1)), opnorm=integrator.opts.internalopnorm, iop=alg.iop)
U2 = uprev + dt * w1[:, 2]
F2 = _compute_nl(f, U2, p, t + dt, A) + A * uprev
w2 = phiv(dt, A, F2, 3; m=min(alg.m, size(A,1)), opnorm=integrator.opts.internalopnorm, iop=alg.iop)
u = uprev + dt * (w1[:,2] - 2w1[:,4] + 2w2[:,4])
if integrator.opts.adaptive
utilde = dt * w1[:,2] # embedded method
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the language of the paper, this is uhat, right? So then in our setup, the error estimate is the first argument of calculate_residuals which should be u - utilde. However, it's more numerically stable to never construct uhat itself. Instead, define utilde = u - uhat = [-2phi_3 phi_3] = 2dt*(-w1[:,4] + w2[:,4]) as the error estimator.

atmp = calculate_residuals(utilde, uprev, u, integrator.opts.abstol, integrator.opts.reltol,integrator.opts.internalnorm)
integrator.EEst = integrator.opts.internalnorm(atmp)
end

# Update integrator state
integrator.fsallast = f(u, p, t + dt)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
integrator.u = u
end

function perform_step!(integrator, cache::Exprb32Cache, repeat_step=false)
@unpack t,dt,uprev,u,f,p = integrator
@unpack utilde,tmp,rtmp,F2,J,KsCache = cache
is_compos = isa(integrator.alg, CompositeAlgorithm)
A = isa(f, SplitFunction) ? f.f1 : (calc_J!(integrator,cache,is_compos); J) # get linear operator
alg = unwrap_alg(integrator, true)

F1 = integrator.fsalfirst
Ks, phiv_cache, ws = KsCache
w1, w2 = ws
# Krylov for F1
arnoldi!(Ks, A, F1; m=min(alg.m, size(A,1)), opnorm=integrator.opts.internalopnorm, cache=tmp, iop=alg.iop)
phiv!(w1, dt, Ks, 3; cache=phiv_cache)
# Krylov for F2
@muladd @. tmp = uprev + dt * @view(w1[:, 2])
_compute_nl!(F2, f, tmp, p, t + dt, A, rtmp)
F2 .+= mul!(rtmp, A, uprev)
arnoldi!(Ks, A, F2; m=min(alg.m, size(A,1)), opnorm=integrator.opts.internalopnorm, cache=tmp, iop=alg.iop)
phiv!(w2, dt, Ks, 3; cache=phiv_cache)
# Update u
u .= uprev
axpy!(dt, @view(w1[:,2]), u)
axpy!(-2dt, @view(w1[:,4]), u)
axpy!(2dt, @view(w2[:,4]), u)
if integrator.opts.adaptive
@views @. utilde = dt * w1[:,2]
calculate_residuals!(tmp, utilde, uprev, u, integrator.opts.abstol, integrator.opts.reltol,integrator.opts.internalnorm)
integrator.EEst = integrator.opts.internalnorm(tmp)
end

# Update integrator state
f(integrator.fsallast, u, p, t + dt)
# integrator.k is automatically set due to aliasing
end

######################################################
# Multistep exponential integrators
function initialize!(integrator,cache::ETD2ConstantCache)
Expand Down
11 changes: 11 additions & 0 deletions test/linear_nonlinear_krylov_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -101,4 +101,15 @@ end
@test sol(1.0) ≈ exp_fun2.analytic(u0,nothing,1.0)
end
end

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Needs a convergence test

@testset "Adaptive exponential Rosenbrock" begin
dt = 0.05
abstol=1e-4; reltol=1e-3
sol_ref = solve(prob, Tsit5(); abstol=abstol, reltol=reltol)

sol = solve(prob, Exprb32(m=20); adaptive=true, abstol=abstol, reltol=reltol)
@test isapprox(sol(1.0), sol_ref(1.0); rtol=reltol)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure this test will do so well given that the error estimators are local instead of global. This can be off by an order of magnitude or more even in a sane integration

sol = solve(prob_ip, Exprb32(m=20); adaptive=true, abstol=abstol, reltol=reltol)
@test isapprox(sol(1.0), sol_ref(1.0); rtol=reltol)
end
end
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There should be a regression test. I would do some diagnostic printing to make sure things are right: check the error estimator against the analytical solution and it should be in the same ballpark, make sure it goes to zero sanely as the tolerances decrease, etc. Then when you think you have it right, make it solve the problem at some standard tolerance and put a test that its length cannot grow much. So if you measure it at 23 steps, put it at <26 or something like that. Differences between processors can change it by a step or two due to floating point differences, but not by a whole lot. A big indicator for when the error estimator is off is that the step count will grow, so that's a nice regression test for the future to indicate any time that the error estimate breaks.