Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,11 @@ addons:
- openmpi-bin
- libopenmpi-dev
notifications:
email: false
email:
on_success: never
on_failure: always
before_script:
- julia -e 'using Pkg; Pkg.add(PackageSpec(name="Gridap",rev="GridapDistributed"))'
after_success:
- julia -e 'using Pkg; Pkg.add("Coverage"); using Coverage; Codecov.submit(process_folder())'
jobs:
Expand All @@ -22,6 +26,7 @@ jobs:
julia: 1.4
script: julia --project=docs -e '
using Pkg;
Pkg.add(PackageSpec(name="Gridap",rev="GridapDistributed"));
Pkg.develop(PackageSpec(path=pwd()));
Pkg.instantiate();
include("docs/make.jl");'
Expand Down
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[compat]
Gridap = "0.10.0"
julia = "1"

[extras]
Expand Down
2 changes: 2 additions & 0 deletions src/DistributedData.jl
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ function get_distributed_data(object)
object
end

get_comm(a) = get_comm(get_distributed_data(a))

# Specializations

struct SequentialDistributedData{T} <: DistributedData{T}
Expand Down
23 changes: 2 additions & 21 deletions src/DistributedFEOperators.jl
Original file line number Diff line number Diff line change
@@ -1,23 +1,4 @@

struct DistributedAffineOperator{A,B}
matrix::A
vector::B
end

Gridap.Algebra.get_matrix(op::DistributedAffineOperator) = op.matrix

Gridap.Algebra.get_vector(op::DistributedAffineOperator) = op.vector

struct DistributedAffineFEOperator
trial::DistributedFESpace
test::DistributedFESpace
op::DistributedAffineOperator
end

Gridap.Algebra.get_matrix(op::DistributedAffineFEOperator) = get_matrix(op.op)

Gridap.Algebra.get_vector(op::DistributedAffineFEOperator) = get_vector(op.op)

function Gridap.FESpaces.AffineFEOperator(dassem::DistributedAssembler, dterms)

dvecdata = DistributedData(dassem,dterms) do part, assem, terms
Expand All @@ -41,8 +22,8 @@ function Gridap.FESpaces.AffineFEOperator(dassem::DistributedAssembler, dterms)
trial = dassem.trial
test = dassem.test

op = DistributedAffineOperator(A,b)
DistributedAffineFEOperator(trial,test,op)
op = AffineOperator(A,b)
AffineFEOperator(trial,test,op)

end

62 changes: 51 additions & 11 deletions src/DistributedFESpaces.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
struct DistributedFESpace
struct DistributedFESpace{V} <: FESpace
vector_type::V
spaces::DistributedData{<:FESpace}
gids::DistributedIndexSet
end
Expand All @@ -12,25 +13,64 @@ function get_distributed_data(dspace::DistributedFESpace)
end
end

function Gridap.TrialFESpace(V::DistributedFESpace,args...)
spaces = DistributedData(V.spaces) do part, space
TrialFESpace(space,args...)
end
DistributedFESpace(spaces,V.gids)
# Minimal FE interface

function Gridap.FESpaces.num_free_dofs(f::DistributedFESpace)
f.gids.ngids
end

function Gridap.FESpaces.FEFunction(dV::DistributedFESpace,x)
dfree_vals = x[dV.gids]
DistributedData(dV.spaces,dfree_vals) do part, V, free_vals
funs = DistributedData(dV.spaces,dfree_vals) do part, V, free_vals
FEFunction(V,free_vals)
end
DistributedFEFunction(funs,x,dV)
end

function Gridap.FESpaces.EvaluationFunction(dV::DistributedFESpace,x)
dfree_vals = x[dV.gids]
funs = DistributedData(dV.spaces,dfree_vals) do part, V, free_vals
EvaluationFunction(V,free_vals)
end
DistributedFEFunction(funs,x,dV)
end

function Gridap.FESpaces.zero_free_values(f::DistributedFESpace)
allocate_vector(f.vector_type,f.gids)
end

# FE Function

struct DistributedFEFunction
funs::DistributedData
vals::AbstractVector
space::DistributedFESpace
end

Gridap.FESpaces.FEFunctionStyle(::Type{DistributedFEFunction}) = Val{true}()

get_distributed_data(u::DistributedFEFunction) = u.funs

Gridap.FESpaces.get_free_values(a::DistributedFEFunction) = a.vals

Gridap.FESpaces.get_fe_space(a::DistributedFEFunction) = a.space

# Constructors

function Gridap.TrialFESpace(V::DistributedFESpace,args...)
spaces = DistributedData(V.spaces) do part, space
TrialFESpace(space,args...)
end
DistributedFESpace(V.vector_type,spaces,V.gids)
end

function Gridap.FESpace(comm::Communicator;model::DistributedDiscreteModel,kwargs...)
DistributedFESpace(comm;model=model,kwargs...)
function Gridap.FESpace(::Type{V};model::DistributedDiscreteModel,kwargs...) where V
DistributedFESpace(V;model=model,kwargs...)
end

function DistributedFESpace(comm::Communicator;model::DistributedDiscreteModel,kwargs...)
function DistributedFESpace(::Type{V}; model::DistributedDiscreteModel,kwargs...) where V

comm = get_comm(model)

nsubdoms = num_parts(model.models)

Expand Down Expand Up @@ -121,7 +161,7 @@ function DistributedFESpace(comm::Communicator;model::DistributedDiscreteModel,k

gids = DistributedIndexSet(init_free_gids,comm,ngids, part_to_lid_to_gid,part_to_lid_to_owner,part_to_ngids)

DistributedFESpace(spaces,gids)
DistributedFESpace(V,spaces,gids)
end

function _update_lid_to_gid!(lid_to_gid,cell_to_lids,cell_to_gids,cell_to_owner,lid_to_owner)
Expand Down
17 changes: 2 additions & 15 deletions src/DistributedVectors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -108,22 +108,11 @@ end

# Assembly related

function Gridap.FESpaces.allocate_vector(::Type{V},gids::DistributedIndexSet) where V <: AbstractVector
function Gridap.Algebra.allocate_vector(::Type{V},gids::DistributedIndexSet) where V <: AbstractVector
ngids = num_gids(gids)
allocate_vector(V,ngids)
end

#TODO move to gridap
function Gridap.FESpaces.allocate_vector(::Type{<:AbstractVector{T}},n::Integer) where T
zeros(T,n)
end

#TODO move to Gridap
function Gridap.Algebra.add_entry!(a,v,i,combine=+)
ai = a[i]
a[i] = combine(ai,v)
end

struct SequentialIJV{A,B}
dIJV::A
gIJV::B
Expand Down Expand Up @@ -153,10 +142,8 @@ function Gridap.Algebra.finalize_coo!(
finalize_coo!(M,I,J,V,num_gids(m),num_gids(n))
end

#TODO this should be defined for any matrix. Fix in gridap
using SparseArrays
function Gridap.Algebra.sparse_from_coo(
::Type{M},IJV::SequentialIJV,m::DistributedIndexSet,n::DistributedIndexSet) where M <: SparseMatrixCSC
::Type{M},IJV::SequentialIJV,m::DistributedIndexSet,n::DistributedIndexSet) where M
I,J,V = IJV.gIJV
sparse_from_coo(M,I,J,V,num_gids(m),num_gids(n))
end
Expand Down
9 changes: 5 additions & 4 deletions test/DistributedAssemblersTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,23 @@ using GridapDistributed
using Test
using SparseArrays

T = Float64
vector_type = Vector{T}
matrix_type = SparseMatrixCSC{T,Int}

subdomains = (2,2)
comm = SequentialCommunicator(subdomains)

domain = (0,1,0,1)
cells = (4,4)
model = CartesianDiscreteModel(comm,subdomains,domain,cells)

V = FESpace(comm,model=model,valuetype=Float64,reffe=:Lagrangian,order=1)
V = FESpace(vector_type,model=model,valuetype=Float64,reffe=:Lagrangian,order=1)

U = TrialFESpace(V)

strategy = RowsComputedLocally(V)

T = Float64
vector_type = Vector{T}
matrix_type = SparseMatrixCSC{T,Int}

assem = SparseMatrixAssembler(matrix_type, vector_type, U, V, strategy)

Expand Down
7 changes: 4 additions & 3 deletions test/DistributedFESpacesTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,16 @@ using Gridap.FESpaces
using Test

subdomains = (2,2)
comm = SequentialCommunicator(subdomains)

domain = (0,1,0,1)
cells = (4,4)
comm = SequentialCommunicator(subdomains)
model = CartesianDiscreteModel(comm,subdomains,domain,cells)

nsubdoms = prod(subdomains)

V = FESpace(comm,model=model,valuetype=Float64,reffe=:Lagrangian,order=1)
vector_type = Vector{Float64}

V = FESpace(vector_type,model=model,valuetype=Float64,reffe=:Lagrangian,order=1)

do_on_parts(V, model) do part,(space,gids), (model,_)

Expand Down
23 changes: 10 additions & 13 deletions test/DistributedPoissonDGTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@ using GridapDistributed: SparseMatrixAssemblerX
using GridapDistributed: RowsComputedLocally
using SparseArrays

# Select matrix and vector types for discrete problem
# Note that here we use serial vectors and matrices
# but the assembly is distributed
T = Float64
vector_type = Vector{T}
matrix_type = SparseMatrixCSC{T,Int}

# Manufactured solution
u(x) = x[1]*(x[1]-1)*x[2]*(x[2]-1)
f(x) = - Δ(u)(x)
Expand All @@ -25,7 +32,7 @@ const h = (domain[2]-domain[1]) / cells[1]
# FE Spaces
order = 2
V = FESpace(
comm, valuetype=Float64, reffe=:Lagrangian, order=order,
vector_type, valuetype=Float64, reffe=:Lagrangian, order=order,
model=model, conformity=:L2)

U = TrialFESpace(V)
Expand Down Expand Up @@ -61,25 +68,16 @@ terms = DistributedData(model) do part, (model,gids)
(t_Ω,t_Γ,t_Γd)
end

# Select matrix and vector types for discrete problem
# Note that here we use serial vectors and matrices
# but the assembly is distributed
T = Float64
vector_type = Vector{T}
matrix_type = SparseMatrixCSC{T,Int}

# Chose parallel assembly strategy
strategy = RowsComputedLocally(V)

# Assembly
assem = SparseMatrixAssembler(matrix_type, vector_type, U, V, strategy)
op = AffineFEOperator(assem,terms)
A = get_matrix(op)
b = get_vector(op)

# FE solution
x = A \ b
uh = FEFunction(U,x)
op = AffineFEOperator(assem,terms)
uh = solve(op)

# Error norms and print solution
sums = DistributedData(model,uh) do part, (model,gids), uh
Expand Down Expand Up @@ -112,5 +110,4 @@ tol = 1.0e-9
@test e_l2 < tol
@test e_h1 < tol


end # module
25 changes: 11 additions & 14 deletions test/DistributedPoissonTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@ using GridapDistributed: SparseMatrixAssemblerX
using GridapDistributed: RowsComputedLocally
using SparseArrays

# Select matrix and vector types for discrete problem
# Note that here we use serial vectors and matrices
# but the assembly is distributed
T = Float64
vector_type = Vector{T}
matrix_type = SparseMatrixCSC{T,Int}

# Manufactured solution
u(x) = x[1] + x[2]
#f(x) = - Δ(u)(x)
Expand All @@ -24,7 +31,7 @@ model = CartesianDiscreteModel(comm,subdomains,domain,cells)
# FE Spaces
order = 1
V = FESpace(
comm, valuetype=Float64, reffe=:Lagrangian, order=1,
vector_type, valuetype=Float64, reffe=:Lagrangian, order=1,
model=model, conformity=:H1)# TODO, dirichlet_tags="boundary") for the moment we solve a l2 problem

U = TrialFESpace(V,u)
Expand All @@ -45,25 +52,15 @@ terms = DistributedData(model) do part, (model,gids)
(t1,)
end

# Select matrix and vector types for discrete problem
# Note that here we use serial vectors and matrices
# but the assembly is distributed
T = Float64
vector_type = Vector{T}
matrix_type = SparseMatrixCSC{T,Int}

# Chose parallel assembly strategy
strategy = RowsComputedLocally(V)

# Assembly
# Assembler
assem = SparseMatrixAssembler(matrix_type, vector_type, U, V, strategy)
op = AffineFEOperator(assem,terms)
A = get_matrix(op)
b = get_vector(op)

# FE solution
x = A \ b
uh = FEFunction(U,x)
op = AffineFEOperator(assem,terms)
uh = solve(op)

# Error norms and print solution
sums = DistributedData(model,uh) do part, (model,gids), uh
Expand Down