You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
OrdinaryDiffEq.jl works with distributed because it works on the AbstractArray interface, but as mentioned in the StackOverflow response, the real issue is what library to pair it with. The most current contender is PartitionedArrays.jl, but currently the demo is stuck: fverdugo/PartitionedArrays.jl#52 . We need to find which array type can work, and how to match that to sparse linear solvers (Elemental.jl). Maybe DistributedArrays is fine?
This demo-ish code is a good started that can be modified to test DistributedArrays and all of that:
using OrdinaryDiffEq, LinearAlgebra, SparseArrays, BenchmarkTools, LinearSolve
const N =32const xyd_brusselator =range(0,stop=1,length=N)
brusselator_f(x, y, t) = (((x-0.3)^2+ (y-0.6)^2) <=0.1^2) * (t >=1.1) *5.limit(a, N) = a == N+1?1: a ==0? N : a
kernel_u! =let N=N, xyd=xyd_brusselator, dx=step(xyd_brusselator)
@inlinefunction (du, u, A, B, α, II, I, t)
i, j =Tuple(I)
x = xyd[I[1]]
y = xyd[I[2]]
ip1 =limit(i+1, N); im1 =limit(i-1, N)
jp1 =limit(j+1, N); jm1 =limit(j-1, N)
du[II[i,j,1]] = α*(u[II[im1,j,1]] + u[II[ip1,j,1]] + u[II[i,jp1,1]] + u[II[i,jm1,1]] -4u[II[i,j,1]]) +
B + u[II[i,j,1]]^2*u[II[i,j,2]] - (A +1)*u[II[i,j,1]] +brusselator_f(x, y, t)
endend
kernel_v! =let N=N, xyd=xyd_brusselator, dx=step(xyd_brusselator)
@inlinefunction (du, u, A, B, α, II, I, t)
i, j =Tuple(I)
ip1 =limit(i+1, N)
im1 =limit(i-1, N)
jp1 =limit(j+1, N)
jm1 =limit(j-1, N)
du[II[i,j,2]] = α*(u[II[im1,j,2]] + u[II[ip1,j,2]] + u[II[i,jp1,2]] + u[II[i,jm1,2]] -4u[II[i,j,2]]) +
A*u[II[i,j,1]] - u[II[i,j,1]]^2*u[II[i,j,2]]
endend
brusselator_2d =let N=N, xyd=xyd_brusselator, dx=step(xyd_brusselator)
function (du, u, p, t)
@inboundsbegin
ii1 = N^2
ii2 = ii1+N^2
ii3 = ii2+2(N^2)
A = p[1]
B = p[2]
α = p[3]/dx^2
II =LinearIndices((N, N, 2))
kernel_u!.(Ref(du), Ref(u), A, B, α, Ref(II), CartesianIndices((N, N)), t)
kernel_v!.(Ref(du), Ref(u), A, B, α, Ref(II), CartesianIndices((N, N)), t)
returnnothingendendend
p = (3.4, 1., 10., step(xyd_brusselator))
functioninit_brusselator_2d(xyd)
N =length(xyd)
u =zeros(N, N, 2)
for I inCartesianIndices((N, N))
x = xyd[I[1]]
y = xyd[I[2]]
u[I,1] =22*(y*(1-y))^(3/2)
u[I,2] =27*(x*(1-x))^(3/2)
end
u
end
u0 =init_brusselator_2d(xyd_brusselator)
prob_ode_brusselator_2d =ODEProblem(brusselator_2d,u0,(0.,11.5),p)
du =similar(u0)
brusselator_2d(du, u0, p, 0.0)
du[34] # 802.9807693762164
du[1058] # 985.3120721709204
du[2000] # -403.5817880634729
du[end] # 1431.1460373522068
du[521] # -323.1677459142322
du2 =similar(u0)
brusselator_2d(du2, u0, p, 1.3)
du2[34] # 802.9807693762164
du2[1058] # 985.3120721709204
du2[2000] # -403.5817880634729
du2[end] # 1431.1460373522068
du2[521] # -318.1677459142322using Symbolics, PartitionedArrays, SparseDiffTools
du0 =copy(u0)
jac_sparsity =float.(Symbolics.jacobian_sparsity((du,u)->brusselator_2d(du,u,p,0.0),du0,u0))
colorvec =matrix_colors(jac_sparsity)
# From https://github.com/fverdugo/PartitionedArrays.jl/blob/v0.2.8/test/test_fdm.jl#L93# A = PSparseMatrix(I,J,V,rows,cols;ids=:local)
II,J,V =findnz(jac_sparsity)
jac_sparsity_distributed = PartitionedArrays.PSparseMatrix(II,J,V,jac_sparsity.rowval,jac_sparsity.colptr)
# MethodError: no method matching PSparseMatrix(::Vector{Int64}, ::Vector{Int64}, ::Vector{Float64}, ::Vector{Int64}, ::Vector{Int64})
f =ODEFunction(brusselator_2d;jac_prototype=jac_sparsity,colorvec = colorvec)
parf =ODEFunction(brusselator_2d;jac_prototype=jac_sparsity_distributed,colorvec = colorvec)
prob_ode_brusselator_2d =ODEProblem(brusselator_2d,u0,(0.0,11.5),p,tstops=[1.1])
prob_ode_brusselator_2d_sparse =ODEProblem(f,u0,(0.0,11.5),p,tstops=[1.1])
nparts =4
pu0 = PartitionedArrays.PVector(u0,nparts) # MethodError: no method matching PVector(::Array{Float64, 3}, ::Int64)
prob_ode_brusselator_2d_parallel =ODEProblem(brusselator_2d,pu0,(0.0,11.5),p,tstops=[1.1])
prob_ode_brusselator_2d_parallelsparse =ODEProblem(parf,pu0,(0.0,11.5),p,tstops=[1.1])
With solver codes:
@timesolve(prob_ode_brusselator_2d_parallel,Rosenbrock23(),save_everystep=false);
@timesolve(prob_ode_brusselator_2d_parallelsparse,Rosenbrock23(),save_everystep=false);
using AlgebraicMultigrid
functionalgebraicmultigrid(W,du,u,p,t,newW,Plprev,Prprev,solverdata)
if newW ===nothing|| newW
Pl =aspreconditioner(ruge_stuben(convert(AbstractMatrix,W)))
else
Pl = Plprev
end
Pl,nothingend# Required due to a bug in Krylov.jl: https://github.com/JuliaSmoothOptimizers/Krylov.jl/pull/477
Base.eltype(::AlgebraicMultigrid.Preconditioner) = Float64
@timesolve(prob_ode_brusselator_2d_parallelsparse,Rosenbrock23(linsolve=KrylovJL_GMRES()),save_everystep=false);
@timesolve(prob_ode_brusselator_2d_parallelsparse,Rosenbrock23(linsolve=KrylovJL_GMRES(),precs=algebraicmultigrid,concrete_jac=true),save_everystep=false);
The text was updated successfully, but these errors were encountered:
OrdinaryDiffEq.jl works with distributed because it works on the AbstractArray interface, but as mentioned in the StackOverflow response, the real issue is what library to pair it with. The most current contender is PartitionedArrays.jl, but currently the demo is stuck: fverdugo/PartitionedArrays.jl#52 . We need to find which array type can work, and how to match that to sparse linear solvers (Elemental.jl). Maybe DistributedArrays is fine?
This demo-ish code is a good started that can be modified to test DistributedArrays and all of that:
With solver codes:
The text was updated successfully, but these errors were encountered: