Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ version = "0.1.0"
[deps]
Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[compat]
julia = "1"
Expand Down
4 changes: 3 additions & 1 deletion src/CartesianDiscreteModels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,21 @@ function Gridap.CartesianDiscreteModel(
comm::Communicator,subdomains::Tuple,gdesc::CartesianDescriptor{D,T,F}) where {D,T,F}

nsubdoms = prod(subdomains)
ngcells = prod(Tuple(gdesc.partition))

S = CartesianDiscreteModel{D,T,F}

models = ScatteredVector{S}(comm,nsubdoms) do (isubdom)

ldesc = local_cartesian_descriptor(gdesc,subdomains,isubdom)
#TODO face labeling has wrong ids
CartesianDiscreteModel(ldesc)
end

gids = GhostedVector{Int}(comm,nsubdoms) do (isubdom)

lid_to_gid, lid_to_owner = local_cartesian_gids(gdesc,subdomains,isubdom)
GhostedVectorPart(lid_to_gid,lid_to_gid,lid_to_owner)
GhostedVectorPart(ngcells,lid_to_gid,lid_to_gid,lid_to_owner)
end

DistributedDiscreteModel(models,gids)
Expand Down
11 changes: 6 additions & 5 deletions src/Communicators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# The abstract Communicator should probably be more abstract.
abstract type Communicator end

function do_on_parts(::Communicator,task::Function,args...)
function do_on_parts(task::Function,::Communicator,args...)
@abstractmethod
end

Expand All @@ -12,15 +12,15 @@ function i_am_master(::Communicator)
end

function do_on_parts(task::Function,args...)
comm = get_comm(first(args))
comm = get_comm(get_distributed_data(first(args)))
do_on_parts(task,comm,args...)
end

struct SequentialCommunicator <: Communicator end

function do_on_parts(task::Function,::SequentialCommunicator,args...)
for part in 1:length(first(args).parts)
largs = map(a->a.parts[part],args)
for part in 1:num_parts(get_distributed_data(first(args)))
largs = map(a->get_distributed_data(a).parts[part],args)
task(part,largs...)
end
end
Expand All @@ -38,6 +38,7 @@ struct MPICommunicator <: Communicator
end

function MPICommunicator()
# TODO copy the communicator
MPICommunicator(MPI.COMM_WORLD)
end

Expand All @@ -47,7 +48,7 @@ end

function do_on_parts(task::Function,comm::MPICommunicator,args...)
part = get_part(comm)
largs = map(a->a.part,args)
largs = map(a->get_distributed_data(a).part,args)
task(part,largs...)
end

Expand Down
20 changes: 13 additions & 7 deletions src/DistributedDiscreteModels.jl
Original file line number Diff line number Diff line change
@@ -1,22 +1,28 @@
# @santiagobadia : In this model, gluing is via global VEF dofs, I guess.
# Do we want something else here? Do we need e.g. ghost cells too?
# I agree that with global vef IDs we have a well-defined distributed model,
# so these cells can be created when computing the triangulation and grid,
# because they will certainly be needed for e.g. dG formulations, etc...
struct DistributedDiscreteModel
models::ScatteredVector{<:DiscreteModel}
gids::GhostedVector{Int}
end

function Gridap.writevtk(model::DistributedDiscreteModel,filebase::String)

function task(part,model,gids)
do_on_parts(model) do part, (model, gids)

cdata = ["gids"=>gids.lid_to_gid,"owner"=>gids.lid_to_owner]
filebase_part = filebase*"_$(part)"
trian = Triangulation(model)
writevtk(trian,filebase_part,celldata=cdata)
end

do_on_parts(task,model.models,model.gids)
end

function get_distributed_data(dmodel::DistributedDiscreteModel)
models = dmodel.models
gids = dmodel.gids
comm = get_comm(models)
nparts = num_parts(models)

T = Tuple{get_part_type(models),get_part_type(gids)}
ScatteredVector{T}(comm,nparts,models,gids) do part, model, lgids
model, lgids
end
end
35 changes: 25 additions & 10 deletions src/DistributedFESpaces.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,18 @@
# @santiagobadia : I think that the meshes in the vector of local FE Spaces
# require ellaboration. They cannot be just the portion of the local mesh, since
# one probably wants to integrate face terms, comnpute error estimates, etc...
# Eventually, we should provide info about number_ghost_layers in the
# constructor
struct DistributedFESpace
spaces::ScatteredVector{<:FESpace}
free_gids::GhostedVector{Int}
gids::GhostedVector{Int}
end

function get_distributed_data(dspace::DistributedFESpace)
spaces = dspace.spaces
gids = dspace.gids
comm = get_comm(spaces)
nparts = num_parts(spaces)

T = Tuple{get_part_type(spaces),get_part_type(gids)}
ScatteredVector{T}(comm,nparts,spaces,gids) do part, space, lgids
space, lgids
end
end

function Gridap.FESpace(comm::Communicator;model::DistributedDiscreteModel,kwargs...)
Expand Down Expand Up @@ -41,9 +48,15 @@ function DistributedFESpace(comm::Communicator;model::DistributedDiscreteModel,k
part_to_num_oids = gather(a)

if i_am_master(comm)
ngids = sum(part_to_num_oids)
ngids_array = fill(ngids,nsubdoms)
_fill_offsets!(part_to_num_oids)
else
ngids_array = Int[]
end

part_to_ngids = scatter(comm,ngids_array)

offsets = scatter(comm,part_to_num_oids)

function init_cell_to_owners(part,lspace,lid_to_owner)
Expand Down Expand Up @@ -88,13 +101,13 @@ function DistributedFESpace(comm::Communicator;model::DistributedDiscreteModel,k

do_on_parts(update_lid_to_owner,part_to_lid_to_gid,spaces,part_to_cell_to_gids)

function init_free_gids(part,lid_to_gid,lid_to_owner)
GhostedVectorPart(lid_to_gid,lid_to_gid,lid_to_owner)
function init_free_gids(part,lid_to_gid,lid_to_owner,ngids)
GhostedVectorPart(ngids,lid_to_gid,lid_to_gid,lid_to_owner)
end

free_gids = GhostedVector{Int}(init_free_gids,comm,nsubdoms,part_to_lid_to_gid,part_to_lid_to_owner)
gids = GhostedVector{Int}(init_free_gids,comm,nsubdoms,part_to_lid_to_gid,part_to_lid_to_owner,part_to_ngids)

DistributedFESpace(spaces,free_gids)
DistributedFESpace(spaces,gids)
end

function _update_lid_to_gid!(lid_to_gid,cell_to_lids,cell_to_gids,cell_to_owner,lid_to_owner)
Expand Down Expand Up @@ -160,3 +173,5 @@ function _fill_max_part_around!(lid_to_owner,cell_to_owner,cell_to_lids)
end
end
end


43 changes: 43 additions & 0 deletions src/DistributedTriangulations.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
struct DistributedTriangulation
trians::ScatteredVector{<:Triangulation}
end

function get_distributed_data(dtrian::DistributedTriangulation)
dtrian.trians
end

function Gridap.Triangulation(dmodel::DistributedDiscreteModel,args...)
comm = get_comm(dmodel.models)
nparts = num_parts(dmodel.models)
trians = ScatteredVector{Triangulation}(comm,nparts,dmodel.models) do part, model
Triangulation(model,args...)
end
DistributedTriangulation(trians)
end

function Gridap.BoundaryTriangulation(dmodel::DistributedDiscreteModel,args...)
comm = get_comm(dmodel.models)
nparts = num_parts(dmodel.models)
trians = ScatteredVector{Triangulation}(comm,nparts,dmodel.models) do part, model
BoundaryTriangulation(model,args...)
end
DistributedTriangulation(trians)
end

function Gridap.SkeletonTriangulation(dmodel::DistributedDiscreteModel,args...)
comm = get_comm(dmodel.models)
nparts = num_parts(dmodel.models)
trians = ScatteredVector{Triangulation}(comm,nparts,dmodel.models) do part, model
SkeletonTriangulation(model,args...)
end
DistributedTriangulation(trians)
end

function Gridap.writevtk(dtrian::DistributedTriangulation,filebase::String)

do_on_parts(dtrian) do part, trian
filebase_part = filebase*"_$(part)"
writevtk(trian,filebase_part)
end

end
26 changes: 22 additions & 4 deletions src/GhostedVectors.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
abstract type GhostedVector{T} end
abstract type GhostedVector{T} <: DistributedData end

Base.eltype(::Type{<:GhostedVector{T}}) where T = T
Base.eltype(::GhostedVector{T}) where T = T

get_part_type(::Type{<:GhostedVector{T}}) where T = GhostedVectorPart{T}
get_part_type(::GhostedVector{T}) where T = GhostedVectorPart{T}

# @santiagobadia : Think about the name... not sure ghosted meaning does have
# much sense in this context. GhostVector or something better (names in PETSc?)
Expand All @@ -12,13 +18,15 @@ abstract type GhostedVector{T} end
# a lazy way? Does it have sense?

struct GhostedVectorPart{T}
ngids::Int
lid_to_item::Vector{T}
lid_to_gid::Vector{Int}
lid_to_owner::Vector{Int}
gid_to_lid::Dict{Int,Int32}
end

function GhostedVectorPart{T}(
ngids::Int,
lid_to_item::Vector,
lid_to_gid::Vector{Int},
lid_to_owner::Vector{Int}) where T
Expand All @@ -28,18 +36,21 @@ function GhostedVectorPart{T}(
gid_to_lid[gid] = lid
end
GhostedVectorPart{T}(
ngids,
lid_to_item,
lid_to_gid,
lid_to_owner,
gid_to_lid)
end

function GhostedVectorPart(
ngids::Int,
lid_to_item::Vector{T},
lid_to_gid::Vector{Int},
lid_to_owner::Vector{Int}) where T

GhostedVectorPart{T}(
ngids,
lid_to_item,
lid_to_gid,
lid_to_owner)
Expand Down Expand Up @@ -69,10 +80,12 @@ end

get_comm(a::SequentialGhostedVector) = SequentialCommunicator()

num_parts(a::SequentialGhostedVector) = length(a.parts)

function GhostedVector{T}(
initializer::Function,::SequentialCommunicator,nparts::Integer,args...) where T

parts = [ initializer(i,map(a->a.parts[i],args)...) for i in 1:nparts ]
parts = [ initializer(i,map(a->get_distributed_data(a).parts[i],args)...) for i in 1:nparts ]
SequentialGhostedVector{T}(parts)
end

Expand All @@ -82,7 +95,8 @@ function GhostedVector{T}(
nparts = length(a.parts)
parts = [
GhostedVectorPart(
initializer(i,map(a->a.parts[i],args)...),
a.parts[i].ngids,
initializer(i,map(a->get_distributed_data(a).parts[i],args)...),
a.parts[i].lid_to_gid,
a.parts[i].lid_to_owner,
a.parts[i].gid_to_lid)
Expand Down Expand Up @@ -112,9 +126,13 @@ struct MPIGhostedVector{T} <: GhostedVector{T}
comm::MPICommunicator
end

get_comm(a::MPIGhostedVector) = a.comm

num_parts(a::MPIGhostedVector) = num_parts(a.comm)

function GhostedVector{T}(initializer::Function,comm::MPICommunicator,nparts::Integer,args...) where T
@assert nparts == num_parts(comm)
largs = map(a->a.part,args)
largs = map(a->get_distributed_data(a).part,args)
i = get_part(comm)
part = initializer(i,largs...)
MPIGhostedVector{T}(part,comm)
Expand Down
Loading