Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,25 +18,28 @@ Tensors = "48a634ad-e948-5137-8d70-aa71f2a747f4"

[weakdeps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Exodus = "f57ae99e-f805-4780-bdca-96e224be1e5a"

[extensions]
FiniteElementContainersAdaptExt = "Adapt"
FiniteElementContainersAMDGPUExt = ["Adapt", "AMDGPU"]
FiniteElementContainersCUDAExt = ["Adapt", "CUDA"]
FiniteElementContainersExodusExt = "Exodus"

[compat]
AcceleratedKernels = "0.3"
Adapt = "3, 4"
AMDGPU = "1"
Aqua = "0.8"
Atomix = "1"
CUDA = "5"
DocStringExtensions = "0.9"
Exodus = "0.13"
JET = "0.9"
Krylov = "0.9"
KernelAbstractions = "0.9"
Krylov = "0.9"
LinearAlgebra = "1"
Parameters = "0.12"
Reexport = "1"
Expand Down
26 changes: 26 additions & 0 deletions ext/FiniteElementContainersAMDGPUExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
module FiniteElementContainersAMDGPUExt

using Adapt
using AMDGPU
using FiniteElementContainers
using KernelAbstractions

FiniteElementContainers.gpu(x) = adapt_structure(ROCArray, x)

Check warning on line 8 in ext/FiniteElementContainersAMDGPUExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/FiniteElementContainersAMDGPUExt.jl#L8

Added line #L8 was not covered by tests

function AMDGPU.rocSPARSE.ROCSparseMatrixCSC(asm::SparseMatrixAssembler)

Check warning on line 10 in ext/FiniteElementContainersAMDGPUExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/FiniteElementContainersAMDGPUExt.jl#L10

Added line #L10 was not covered by tests
# TODO Not sure what the AMD Backend is called in KernelAbstractions
# I couldn't quite figure it out. This assert statement below though
# would be good for error checking and device consistency.
# @assert typeof(get_backend(asm)) <: CUDABackend "Assembler is not on a CUDA device"
@assert length(asm.pattern.cscnzval) > 0 "Need to assemble the assembler once with SparseArrays.sparse!(assembler)"
@assert all(x -> x != zero(eltype(asm.pattern.cscnzval)), asm.pattern.cscnzval) "Need to assemble the assembler once with SparseArrays.sparse!(assembler)"
n_dofs = FiniteElementContainers.num_unknowns(asm.dof)
return AMDGPU.rocSPARSE.ROCSparseMatrixCSC(

Check warning on line 18 in ext/FiniteElementContainersAMDGPUExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/FiniteElementContainersAMDGPUExt.jl#L15-L18

Added lines #L15 - L18 were not covered by tests
asm.pattern.csccolptr,
asm.pattern.cscrowval,
asm.pattern.cscnzval,
(n_dofs, n_dofs)
)
end

end # module
Loading