Skip to content

Commit

Permalink
Add GPU tests (#222)
Browse files Browse the repository at this point in the history
* Add GPU tests

* Add CUDA version compatible with 1.6

* Update the GPU tests to use buildkite

---------

Co-authored-by: Alexis Montoison <alexis.montoison@polymtl.ca>
  • Loading branch information
tmigot and amontoison committed May 7, 2024
1 parent be41bb6 commit 53a494c
Show file tree
Hide file tree
Showing 26 changed files with 161 additions and 95 deletions.
2 changes: 1 addition & 1 deletion .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,6 @@ steps:
queue: "juliagpu"
cuda: "*"
command: |
julia --color=yes --project -e 'using Pkg; Pkg.add("CUDA"); Pkg.instantiate(); using CUDA'
julia --color=yes --project -e 'using Pkg; Pkg.add("CUDA"); Pkg.add("NLPModels"); Pkg.add("NLPModelsTest"); Pkg.instantiate()'
julia --color=yes --project -e 'include("test/gpu.jl")'
timeout_in_minutes: 30
10 changes: 5 additions & 5 deletions src/ad_api.jl
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,10 @@ Jtprod!(nlp::AbstractNLPModel, Jtv, c, x, v, ::Val{:F}) = jtprod_residual!(nlp,
function Hvprod!(nlp::AbstractNLPModel, Hv, x, v, ℓ, ::Val{:obj}, obj_weight)
return hprod!(nlp, x, v, Hv, obj_weight = obj_weight)
end
function Hvprod!(nlp::AbstractNLPModel, Hv, x, v, ℓ, ::Val{:lag}, y, obj_weight)
function Hvprod!(nlp::AbstractNLPModel, Hv, x::S, v, ℓ, ::Val{:lag}, y, obj_weight) where {S}
if nlp.meta.nlin > 0
# y is of length nnln, and hprod expectes ncon...
yfull = zeros(eltype(x), nlp.meta.ncon)
yfull = fill!(S(undef, nlp.meta.ncon), 0)
k = 0
for i in nlp.meta.nln
k += 1
Expand Down Expand Up @@ -200,14 +200,14 @@ end
function NLPModels.hess_coord!(
nlp::AbstractNLPModel,
::ADModel,
x::AbstractVector,
x::S,
y::AbstractVector,
obj_weight::Real,
vals::AbstractVector,
)
) where {S}
if nlp.meta.nlin > 0
# y is of length nnln, and hess expectes ncon...
yfull = zeros(eltype(x), nlp.meta.ncon)
yfull = fill!(S(undef, nlp.meta.ncon), 0)
k = 0
for i in nlp.meta.nln
k += 1
Expand Down
9 changes: 5 additions & 4 deletions src/forward.jl
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,10 @@ function ForwardDiffADHvprod(
f,
ncon::Integer = 0,
c!::Function = (args...) -> [];
x0::AbstractVector{T} = rand(nvar),
x0::S = rand(nvar),
kwargs...,
) where {T}
) where {S}
T = eltype(S)
function lag(z; nvar = nvar, ncon = ncon, f = f, c! = c!)
cx, x, y, ob = view(z, 1:ncon),
view(z, (ncon + 1):(nvar + ncon)),
Expand All @@ -221,8 +222,8 @@ function ForwardDiffADHvprod(
ForwardDiff.gradient!(gz, lag, z, cfg)
return gz
end
longv = zeros(T, ntotal)
Hvp = zeros(T, ntotal)
longv = fill!(S(undef, ntotal), 0)
Hvp = fill!(S(undef, ntotal), 0)

# unconstrained Hessian
tagf = ForwardDiff.Tag{typeof(f), T}
Expand Down
7 changes: 4 additions & 3 deletions src/sparse_diff_tools.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,17 @@
f,
ncon,
c!;
x0::AbstractVector{T} = rand(nvar),
x0::S = rand(nvar),
alg::SparseDiffTools.SparseDiffToolsColoringAlgorithm = SparseDiffTools.GreedyD1Color(),
kwargs...,
) where {T}
) where {S}
T = eltype(S)
output = similar(x0, ncon)
J = compute_jacobian_sparsity(c!, output, x0)
colors = sparse_matrix_colors(J, alg)
jac = SparseMatrixCSC{T, Int}(J.m, J.n, J.colptr, J.rowval, T.(J.nzval))

dx = zeros(T, ncon)
dx = fill!(S(undef, ncon), 0)
cfJ = SparseDiffTools.ForwardColorJacCache(c!, x0, colorvec = colors, dx = dx, sparsity = jac)
SDTSparseADJacobian(cfJ)
end
Expand Down
22 changes: 11 additions & 11 deletions src/sparse_hessian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,13 @@ function SparseADHessian(
f,
ncon,
c!;
x0::AbstractVector{T} = rand(nvar),
x0::S = rand(nvar),
alg = ColPackColoration(),
kwargs...,
) where {T}
S = compute_hessian_sparsity(f, nvar, c!, ncon)
H = ncon == 0 ? S : S[1:nvar, 1:nvar]
) where {S}
T = eltype(S)
Hs = compute_hessian_sparsity(f, nvar, c!, ncon)
H = ncon == 0 ? Hs : Hs[1:nvar, 1:nvar]

colors = sparse_matrix_colors(H, alg)
ncolors = maximum(colors)
Expand Down Expand Up @@ -59,10 +60,9 @@ function SparseADHessian(
ForwardDiff.gradient!(gz, lag, z, cfg)
return gz
end
longv = zeros(T, ntotal)
Hvp = zeros(T, ntotal)

y = zeros(T, ncon)
longv = fill!(S(undef, ntotal), 0)
Hvp = fill!(S(undef, ntotal), 0)
y = fill!(S(undef, ncon), 0)

return SparseADHessian(d, rowval, colptr, colors, ncolors, res, lz, glz, sol, longv, Hvp, ∇φ!, y)
end
Expand Down Expand Up @@ -95,8 +95,8 @@ function SparseReverseADHessian(
alg = ColPackColoration(),
kwargs...,
) where {T}
S = compute_hessian_sparsity(f, nvar, c!, ncon)
H = ncon == 0 ? S : S[1:nvar, 1:nvar]
Hs = compute_hessian_sparsity(f, nvar, c!, ncon)
H = ncon == 0 ? Hs : Hs[1:nvar, 1:nvar]

colors = sparse_matrix_colors(H, alg)
ncolors = maximum(colors)
Expand Down Expand Up @@ -138,7 +138,7 @@ function SparseReverseADHessian(
end
Hv_temp = similar(x0)

y = zeros(T, ncon)
y = similar(x0, ncon)
return SparseReverseADHessian(
d,
rowval,
Expand Down
6 changes: 3 additions & 3 deletions src/sparse_sym.jl
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,9 @@ function SparseSymbolicsADHessian(
f,
ncon,
c!;
x0::AbstractVector{T} = rand(nvar),
x0::S = rand(nvar),
kwargs...,
) where {T}
) where {S}
Symbolics.@variables xs[1:nvar], μs
xsi = Symbolics.scalarize(xs)
fun = μs * f(xsi)
Expand All @@ -122,7 +122,7 @@ function SparseSymbolicsADHessian(
# cfH is a Tuple{Expr, Expr}, cfH[2] is the in-place function
# that we need to update a vector `vals` with the nonzeros of ∇²ℓ(x, y, μ).
cfH = Symbolics.build_function(vals, xsi, ysi, μs, expression = Val{false})
y = zeros(T, ncon)
y = fill!(S(undef, ncon), 0)
return SparseSymbolicsADHessian(nnzh, rows, cols, y, cfH[2])
end

Expand Down
2 changes: 2 additions & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Expand All @@ -14,6 +15,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
CUDA = "4, 5"
Enzyme = "0.10, 0.11, 0.12"
ForwardDiff = "0.10"
ManualNLPModels = "0.1"
Expand Down
30 changes: 29 additions & 1 deletion test/gpu.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,31 @@
using CUDA, Test
using CUDA, LinearAlgebra, SparseArrays, Test
using ADNLPModels, NLPModels, NLPModelsTest

for problem in NLPModelsTest.nlp_problems ["GENROSE"]
include("nlp/problems/$(lowercase(problem)).jl")
end
for problem in NLPModelsTest.nls_problems
include("nls/problems/$(lowercase(problem)).jl")
end

@test CUDA.functional()

@testset "Checking NLPModelsTest (NLP) tests with $backend - GPU multiple precision" for backend in keys(ADNLPModels.predefined_backend)
@testset "Checking GPU multiple precision on problem $problem" for problem in NLPModelsTest.nlp_problems
nlp_from_T = eval(Meta.parse(lowercase(problem) * "_autodiff"))
CUDA.allowscalar() do
# sparse Jacobian/Hessian doesn't work here
multiple_precision_nlp_array(T -> nlp_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jth_hprod, hprod, jprod], linear_api = true)
end
end
end

@testset "Checking NLPModelsTest (NLS) tests with $backend - GPU multiple precision" for backend in keys(ADNLPModels.predefined_backend)
@testset "Checking GPU multiple precision on problem $problem" for problem in NLPModelsTest.nls_problems
nls_from_T = eval(Meta.parse(lowercase(problem) * "_autodiff"))
CUDA.allowscalar() do
# sparse Jacobian/Hessian doesn't work here
multiple_precision_nls_array(T -> nls_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian, jacobian_residual_backend = ADNLPModels.ForwardDiffADJacobian, hessian_residual_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jprod, jprod_residual, hprod_residual], linear_api = true)
end
end
end
8 changes: 8 additions & 0 deletions test/nlp/nlpmodelstest.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,14 @@
@testset "Check multiple precision" begin
multiple_precision_nlp(nlp_from_T, exclude = [], linear_api = true)
end
@testset "Check multiple precision GPU" begin
if CUDA.functional()
CUDA.allowscalar() do
# sparse Jacobian/Hessian doesn't work here
multiple_precision_nlp_array(T -> nlp_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jth_hprod, hprod, jprod], linear_api = true)
end
end
end
@testset "Check view subarray" begin
view_subarray_nlp(nlp_ad, exclude = [])
end
Expand Down
6 changes: 4 additions & 2 deletions test/nlp/problems/brownden.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
export brownden_autodiff

function brownden_autodiff(::Type{T} = Float64; kwargs...) where {T}
x0 = T[25.0; 5.0; -5.0; -1.0]
brownden_autodiff(::Type{T}; kwargs...) where {T <: Number} = brownden_autodiff(Vector{T}; kwargs...)
function brownden_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
T = eltype(S)
x0 = S([25.0; 5.0; -5.0; -1.0])
f(x) = begin
s = zero(T)
for i = 1:20
Expand Down
9 changes: 5 additions & 4 deletions test/nlp/problems/hs10.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
export hs10_autodiff

function hs10_autodiff(::Type{T} = Float64; kwargs...) where {T}
x0 = T[-10.0; 10.0]
hs10_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs10_autodiff(Vector{T}; kwargs...)
function hs10_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
x0 = S([-10; 10])
f(x) = x[1] - x[2]
c(x) = [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1]
lcon = T[0.0]
ucon = T[Inf]
lcon = S([0])
ucon = S([Inf])

return ADNLPModel(f, x0, c, lcon, ucon, name = "hs10_autodiff"; kwargs...)
end
9 changes: 5 additions & 4 deletions test/nlp/problems/hs11.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
export hs11_autodiff

function hs11_autodiff(::Type{T} = Float64; kwargs...) where {T}
x0 = T[4.9; 0.1]
hs11_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs11_autodiff(Vector{T}; kwargs...)
function hs11_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
x0 = S([49 // 10; 1 // 10])
f(x) = (x[1] - 5)^2 + x[2]^2 - 25
c(x) = [-x[1]^2 + x[2]]
lcon = T[-Inf]
ucon = T[0.0]
lcon = S([-Inf])
ucon = S([0])

return ADNLPModel(f, x0, c, lcon, ucon, name = "hs11_autodiff"; kwargs...)
end
13 changes: 7 additions & 6 deletions test/nlp/problems/hs13.jl
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
export hs13_autodiff

function hs13_autodiff(::Type{T} = Float64; kwargs...) where {T}
hs13_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs13_autodiff(Vector{T}; kwargs...)
function hs13_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
function f(x)
return (x[1] - 2)^2 + x[2]^2
end
x0 = -2 * ones(T, 2)
lvar = zeros(T, 2)
uvar = T(Inf) * ones(T, 2)
x0 = fill!(S(undef, 2), -2)
lvar = fill!(S(undef, 2), 0)
uvar = fill!(S(undef, 2), Inf)
function c(x)
return [(1 - x[1])^3 - x[2]]
end
lcon = zeros(T, 1)
ucon = T(Inf) * ones(T, 1)
lcon = fill!(S(undef, 1), 0)
ucon = fill!(S(undef, 1), Inf)
return ADNLPModels.ADNLPModel(f, x0, lvar, uvar, c, lcon, ucon, name = "hs13_autodiff"; kwargs...)
end
11 changes: 6 additions & 5 deletions test/nlp/problems/hs14.jl
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
export hs14_autodiff

function hs14_autodiff(::Type{T} = Float64; kwargs...) where {T}
x0 = T[2.0; 2.0]
hs14_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs14_autodiff(Vector{T}; kwargs...)
function hs14_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
x0 = S([2; 2])
f(x) = (x[1] - 2)^2 + (x[2] - 1)^2
c(x) = [-x[1]^2 / 4 - x[2]^2 + 1]
lcon = T[-1; 0.0]
ucon = T[-1; Inf]
lcon = S([-1; 0])
ucon = S([-1; Inf])

clinrows = [1, 1]
clincols = [1, 2]
clinvals = T[1, -2]
clinvals = S([1, -2])

return ADNLPModel(
f,
Expand Down
9 changes: 5 additions & 4 deletions test/nlp/problems/hs5.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
export hs5_autodiff

function hs5_autodiff(::Type{T} = Float64; kwargs...) where {T}
x0 = zeros(T, 2)
hs5_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs5_autodiff(Vector{T}; kwargs...)
function hs5_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
x0 = fill!(S(undef, 2), 0)
f(x) = sin(x[1] + x[2]) + (x[1] - x[2])^2 - 3x[1] / 2 + 5x[2] / 2 + 1
l = T[-1.5; -3.0]
u = T[4.0; 3.0]
l = S([-1.5; -3.0])
u = S([4.0; 3.0])

return ADNLPModel(f, x0, l, u, name = "hs5_autodiff"; kwargs...)
end
9 changes: 5 additions & 4 deletions test/nlp/problems/hs6.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
export hs6_autodiff

function hs6_autodiff(::Type{T} = Float64; kwargs...) where {T}
x0 = T[-1.2; 1.0]
hs6_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs6_autodiff(Vector{T}; kwargs...)
function hs6_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
x0 = S([-12 // 10; 1])
f(x) = (1 - x[1])^2
c(x) = [10 * (x[2] - x[1]^2)]
lcon = T[0.0]
ucon = T[0.0]
lcon = fill!(S(undef, 1), 0)
ucon = fill!(S(undef, 1), 0)

return ADNLPModel(f, x0, c, lcon, ucon, name = "hs6_autodiff"; kwargs...)
end
12 changes: 7 additions & 5 deletions test/nlp/problems/lincon.jl
Original file line number Diff line number Diff line change
@@ -1,22 +1,24 @@
export lincon_autodiff

function lincon_autodiff(::Type{T} = Float64; kwargs...) where {T}
lincon_autodiff(::Type{T}; kwargs...) where {T <: Number} = lincon_autodiff(Vector{T}; kwargs...)
function lincon_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
T = eltype(S)
A = T[1 2; 3 4]
b = T[5; 6]
B = diagm(T[3 * i for i = 3:5])
c = T[1; 2; 3]
C = T[0 -2; 4 0]
d = T[1; -1]

x0 = zeros(T, 15)
x0 = fill!(S(undef, 15), 0)
f(x) = sum(i + x[i]^4 for i = 1:15)

lcon = T[22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)]
ucon = T[22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c]
lcon = S([22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)])
ucon = S([22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c])

clinrows = [1, 2, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11]
clincols = [15, 10, 11, 12, 13, 14, 8, 9, 7, 6, 1, 1, 2, 2, 3, 4, 5]
clinvals = vcat(T(15), c, d, b, C[1, 2], C[2, 1], A[:], diag(B))
clinvals = S(vcat(T(15), c, d, b, C[1, 2], C[2, 1], A[:], diag(B)))

return ADNLPModel(
f,
Expand Down
11 changes: 6 additions & 5 deletions test/nlp/problems/linsv.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
export linsv_autodiff

function linsv_autodiff(::Type{T} = Float64; kwargs...) where {T}
x0 = zeros(T, 2)
linsv_autodiff(::Type{T}; kwargs...) where {T <: Number} = linsv_autodiff(Vector{T}; kwargs...)
function linsv_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
x0 = fill!(S(undef, 2), 0)
f(x) = x[1]
lcon = T[3.0; 1.0]
ucon = T[Inf; Inf]
lcon = S([3; 1])
ucon = S([Inf; Inf])

clinrows = [1, 1, 2]
clincols = [1, 2, 2]
clinvals = T[1, 1, 1]
clinvals = S([1, 1, 1])

return ADNLPModel(
f,
Expand Down
Loading

0 comments on commit 53a494c

Please sign in to comment.