diff --git a/Project.toml b/Project.toml index b0cd9950..14d29d97 100644 --- a/Project.toml +++ b/Project.toml @@ -39,7 +39,7 @@ Adapt = "4" BandedMatrices = "1" BlockBandedMatrices = "0.13" CUDA = "5" -CUDSS = "0.5, 0.6" +CUDSS = "0.6.1" ChainRules = "1" ChainRulesCore = "1" ChainRulesTestUtils = "1" diff --git a/test/gpu/Project.toml b/test/gpu/Project.toml index 353250f1..1ef4f102 100644 --- a/test/gpu/Project.toml +++ b/test/gpu/Project.toml @@ -1,3 +1,4 @@ [deps] CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -Metal = "dde4c033-4e86-420c-a63e-0dd931031962" \ No newline at end of file +CUDSS = "45b445bb-4962-46a0-9369-b4df9d0f772e" +Metal = "dde4c033-4e86-420c-a63e-0dd931031962" diff --git a/test/gpu/cuda.jl b/test/gpu/cuda.jl index f57bdafa..2651a638 100644 --- a/test/gpu/cuda.jl +++ b/test/gpu/cuda.jl @@ -1,7 +1,20 @@ -using CUDA +using CUDSS, CUDA, SparseArrays, LinearAlgebra +using CUDA.CUSPARSE using ArrayInterface using Test +A_cpu = Float32[1 0; 0 1] +A_dense = CuMatrix(A_cpu) +A_sparse = CuSparseMatrixCSR(sparse(A_cpu)) + # Test whether lu_instance throws an error when invoked with an gpu array -@test !isa(try ArrayInterface.lu_instance(CUDA.CuArray([1.f0 1.f0; 1.f0 1.f0])) catch ex ex end, Exception) +lu_inst_dense = ArrayInterface.lu_instance(A_dense) +lu_inst_sparse = ArrayInterface.lu_instance(A_sparse) + +# test that lu! is valid when using the inst as scratch +lu_sparse = lu!(lu_inst_sparse, A_sparse) + +#test that the resulting lu works +b = CuVector([1f0, 1f0]) +@test CUDA.@allowscalar lu_sparse \ b == [1, 1]