Skip to content

Commit

Permalink
test: use LuxCUDA and gpu_device for using GPUs
Browse files Browse the repository at this point in the history
  • Loading branch information
sathvikbhagavan committed Jan 18, 2024
1 parent 3bf7f2f commit ace150f
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 10 deletions.
8 changes: 4 additions & 4 deletions test/NNPDE_tests_gpu.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ eq = Dθ(u(θ)) ~ θ^3 + 2.0f0 * θ + (θ^2) * ((1.0f0 + 3 * (θ^2)) / (1.0f0 +
u(θ) *+ ((1.0f0 + 3.0f0 *^2)) / (1.0f0 + θ + θ^3)))

# Initial and boundary conditions
bcs = [u(0.0) ~ 1.0f0]
bcs = [u(0.f0) ~ 1.0f0]

# Space and time domains
domains = Interval(0.0f0, 1.0f0)]
Expand Down Expand Up @@ -85,7 +85,7 @@ chain = Flux.Chain(Dense(2, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, 1)) |> gpu
Dense(inner, 1)) |> gpu |> f64

strategy = NeuralPDE.StochasticTraining(500)
discretization = NeuralPDE.PhysicsInformedNN(chain,
Expand Down Expand Up @@ -138,7 +138,7 @@ chain = Flux.Chain(Dense(2, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, 1)) |> gpu
Dense(inner, 1)) |> gpu |> f64

strategy = NeuralPDE.QuasiRandomTraining(500; #points
sampling_alg = SobolSample(),
Expand Down Expand Up @@ -205,7 +205,7 @@ chain = Flux.Chain(Dense(3, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, inner, Flux.σ),
Dense(inner, 1)) |> gpu
Dense(inner, 1)) |> gpu |> f64

strategy = NeuralPDE.GridTraining(0.05)
discretization = NeuralPDE.PhysicsInformedNN(chain,
Expand Down
12 changes: 6 additions & 6 deletions test/NNPDE_tests_gpu_Lux.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
using Lux, ComponentArrays, OptimizationOptimisers
using Test, NeuralPDE
using Optimization
using CUDA, QuasiMonteCarlo
using LuxCUDA, QuasiMonteCarlo
import ModelingToolkit: Interval, infimum, supremum

using Random
Expand All @@ -12,7 +12,7 @@ callback = function (p, l)
return false
end
CUDA.allowscalar(false)
#const gpuones = cu(ones(1))
const gpud = gpu_device()

## ODE
println("ode")
Expand Down Expand Up @@ -41,7 +41,7 @@ chain = Chain(Dense(1, inner, Lux.σ),
Dense(inner, 1))

strategy = NeuralPDE.GridTraining(dt)
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpud
discretization = NeuralPDE.PhysicsInformedNN(chain,
strategy;
init_params = ps)
Expand Down Expand Up @@ -90,7 +90,7 @@ chain = Lux.Chain(Dense(2, inner, Lux.σ),
Dense(inner, 1))

strategy = NeuralPDE.StochasticTraining(500)
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpud .|> Float64
discretization = NeuralPDE.PhysicsInformedNN(chain,
strategy;
init_params = ps)
Expand Down Expand Up @@ -148,7 +148,7 @@ strategy = NeuralPDE.QuasiRandomTraining(500; #points
sampling_alg = SobolSample(),
resampling = false,
minibatch = 30)
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpud .|> Float64
discretization = NeuralPDE.PhysicsInformedNN(chain,
strategy;
init_params = ps)
Expand Down Expand Up @@ -213,7 +213,7 @@ chain = Lux.Chain(Dense(3, inner, Lux.σ),
Dense(inner, 1))

strategy = NeuralPDE.GridTraining(0.05)
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64
ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpud .|> Float64
discretization = NeuralPDE.PhysicsInformedNN(chain,
strategy;
init_params = ps)
Expand Down

0 comments on commit ace150f

Please sign in to comment.