diff --git a/.github/workflows/CI_NonlinearSolve.yml b/.github/workflows/CI_NonlinearSolve.yml index 64db32335..aa31f473d 100644 --- a/.github/workflows/CI_NonlinearSolve.yml +++ b/.github/workflows/CI_NonlinearSolve.yml @@ -36,6 +36,7 @@ jobs: - wrappers - misc - nopre + - trim version: - "1" - "1.11" @@ -48,6 +49,11 @@ jobs: # Don't run nopre tests on prerelease Julia - group: nopre version: "pre" + # Don't run trim tests on Julia versions below 1.12 + - group: trim + version: "1.11" + - group: trim + version: "lts" uses: LuxDL/Lux.jl/.github/workflows/CommonCI.yml@main with: julia_version: ${{ matrix.version }} diff --git a/Project.toml b/Project.toml index 8bb3832e2..305978880 100644 --- a/Project.toml +++ b/Project.toml @@ -148,6 +148,7 @@ ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" SciMLLogging = "a6db7da4-7206-11f0-1eab-35f2a5dbe1d1" SIAMFANLEquations = "084e46ad-d928-497d-ad5e-07fa361a48c4" +SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" @@ -159,4 +160,4 @@ Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [targets] -test = ["Aqua", "BandedMatrices", "BenchmarkTools", "ExplicitImports", "FastLevenbergMarquardt", "FixedPointAcceleration", "Hwloc", "InteractiveUtils", "LeastSquaresOptim", "LineSearches", "MINPACK", "NLSolvers", "NLsolve", "NaNMath", "NonlinearProblemLibrary", "OrdinaryDiffEqTsit5", "PETSc", "Pkg", "PolyesterForwardDiff", "Random", "ReTestItems", "SIAMFANLEquations", "SparseConnectivityTracer", "SparseMatrixColorings", "SpeedMapping", "StableRNGs", "StaticArrays", "Sundials", "Test", "Zygote", "ReverseDiff", "Tracker", "SciMLLogging"] +test = ["Aqua", "BandedMatrices", "BenchmarkTools", "ExplicitImports", "FastLevenbergMarquardt", "FixedPointAcceleration", "Hwloc", "InteractiveUtils", "LeastSquaresOptim", "LineSearches", "MINPACK", "NLSolvers", "NLsolve", "NaNMath", "NonlinearProblemLibrary", "OrdinaryDiffEqTsit5", "PETSc", "Pkg", "PolyesterForwardDiff", "Random", "ReTestItems", "SafeTestsets", "SIAMFANLEquations", "SparseConnectivityTracer", "SparseMatrixColorings", "SpeedMapping", "StableRNGs", "StaticArrays", "Sundials", "Test", "Zygote", "ReverseDiff", "Tracker", "SciMLLogging"] diff --git a/test/runtests.jl b/test/runtests.jl index a84b3f88b..62e2386f7 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,4 @@ -using ReTestItems, NonlinearSolve, Hwloc, InteractiveUtils, Pkg +using ReTestItems, Hwloc, InteractiveUtils, Pkg @info sprint(InteractiveUtils.versioninfo) @@ -28,48 +28,55 @@ const GROUP = lowercase(get_from_test_args_or_env("GROUP", "all")) # To re-enable: change condition to `true` or `VERSION < v"1.13"` const ENZYME_ENABLED = VERSION < v"1.12" -const EXTRA_PKGS = Pkg.PackageSpec[] -if GROUP == "all" || GROUP == "downstream" - push!(EXTRA_PKGS, Pkg.PackageSpec("ModelingToolkit")) - push!(EXTRA_PKGS, Pkg.PackageSpec("SymbolicIndexingInterface")) -end -if GROUP == "all" || GROUP == "nopre" - # Only add Enzyme for nopre group if not on prerelease Julia and if enabled - if isempty(VERSION.prerelease) && ENZYME_ENABLED - push!(EXTRA_PKGS, Pkg.PackageSpec("Enzyme")) - push!(EXTRA_PKGS, Pkg.PackageSpec("Mooncake")) - push!(EXTRA_PKGS, Pkg.PackageSpec("SciMLSensitivity")) +if GROUP != "trim" + using NonlinearSolve # trimming uses NonlinearSolve from a custom environment + + const EXTRA_PKGS = Pkg.PackageSpec[] + if GROUP == "all" || GROUP == "downstream" + push!(EXTRA_PKGS, Pkg.PackageSpec("ModelingToolkit")) + push!(EXTRA_PKGS, Pkg.PackageSpec("SymbolicIndexingInterface")) end -end -if GROUP == "all" || GROUP == "cuda" - # Only add CUDA for cuda group if not on prerelease Julia - if isempty(VERSION.prerelease) - push!(EXTRA_PKGS, Pkg.PackageSpec("CUDA")) + if GROUP == "all" || GROUP == "nopre" + # Only add Enzyme for nopre group if not on prerelease Julia and if enabled + if isempty(VERSION.prerelease) && ENZYME_ENABLED + push!(EXTRA_PKGS, Pkg.PackageSpec("Enzyme")) + push!(EXTRA_PKGS, Pkg.PackageSpec("Mooncake")) + push!(EXTRA_PKGS, Pkg.PackageSpec("SciMLSensitivity")) + end end -end - -length(EXTRA_PKGS) ≥ 1 && Pkg.add(EXTRA_PKGS) + if GROUP == "all" || GROUP == "cuda" + # Only add CUDA for cuda group if not on prerelease Julia + if isempty(VERSION.prerelease) + push!(EXTRA_PKGS, Pkg.PackageSpec("CUDA")) + end + end + length(EXTRA_PKGS) ≥ 1 && Pkg.add(EXTRA_PKGS) -# Use sequential execution for wrapper tests to avoid parallel initialization issues -const RETESTITEMS_NWORKERS = if GROUP == "wrappers" - 0 # Sequential execution for wrapper tests -else - tmp = get(ENV, "RETESTITEMS_NWORKERS", "") - isempty(tmp) && - (tmp = string(min(ifelse(Sys.iswindows(), 0, Hwloc.num_physical_cores()), 4))) - parse(Int, tmp) -end -const RETESTITEMS_NWORKER_THREADS = begin - tmp = get(ENV, "RETESTITEMS_NWORKER_THREADS", "") - isempty(tmp) && - (tmp = string(max(Hwloc.num_virtual_cores() ÷ max(RETESTITEMS_NWORKERS, 1), 1))) - parse(Int, tmp) -end + # Use sequential execution for wrapper tests to avoid parallel initialization issues + const RETESTITEMS_NWORKERS = if GROUP == "wrappers" + 0 # Sequential execution for wrapper tests + else + tmp = get(ENV, "RETESTITEMS_NWORKERS", "") + isempty(tmp) && + (tmp = string(min(ifelse(Sys.iswindows(), 0, Hwloc.num_physical_cores()), 4))) + parse(Int, tmp) + end + const RETESTITEMS_NWORKER_THREADS = begin + tmp = get(ENV, "RETESTITEMS_NWORKER_THREADS", "") + isempty(tmp) && + (tmp = string(max(Hwloc.num_virtual_cores() ÷ max(RETESTITEMS_NWORKERS, 1), 1))) + parse(Int, tmp) + end -@info "Running tests for group: $(GROUP) with $(RETESTITEMS_NWORKERS) workers" + @info "Running tests for group: $(GROUP) with $(RETESTITEMS_NWORKERS) workers" -ReTestItems.runtests( - NonlinearSolve; tags = (GROUP == "all" ? nothing : [Symbol(GROUP)]), - nworkers = RETESTITEMS_NWORKERS, nworker_threads = RETESTITEMS_NWORKER_THREADS, - testitem_timeout = 3600 -) + ReTestItems.runtests( + NonlinearSolve; tags = (GROUP == "all" ? nothing : [Symbol(GROUP)]), + nworkers = RETESTITEMS_NWORKERS, nworker_threads = RETESTITEMS_NWORKER_THREADS, + testitem_timeout = 3600 + ) +elseif GROUP == "trim" && VERSION >= v"1.12.0-rc1" # trimming has been introduced in julia 1.12 + Pkg.activate(joinpath(dirname(@__FILE__), "trim")) + Pkg.instantiate() + include("trim/runtests.jl") +end diff --git a/test/trim/Project.toml b/test/trim/Project.toml new file mode 100644 index 000000000..6051e452c --- /dev/null +++ b/test/trim/Project.toml @@ -0,0 +1,34 @@ +name = "TrimTest" +uuid = "7e54ada7-ece5-4046-aa01-512d530850d8" + +[deps] +ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" +CPUSummary = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9" +DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" +NonlinearSolveFirstOrder = "5959db7a-ea39-4486-b5fe-2dd0bf03d60d" +Polyester = "f517fe37-dbe3-4b94-8317-1923a5111588" +PolyesterWeave = "1d0040c9-8b98-4ee7-8388-3f51789ca0ad" +SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + +[sources] +# Remove assert that triggers false positive for JET. Tracked at https://github.com/aviatesk/JET.jl/issues/736. +ForwardDiff = {url = "https://github.com/RomeoV/ForwardDiff.jl", rev="rv/remove-quote-assert-string-interpolation"} +NonlinearSolveFirstOrder = {path = "../../lib/NonlinearSolveFirstOrder"} + + +[compat] +ADTypes = "1.15.0" +CPUSummary = "0.2.7" +DiffEqBase = "6.179.0" +ForwardDiff = "1.0.1" +LinearAlgebra = "1.12.0" +NonlinearSolveFirstOrder = "1.6.0" +Polyester = "0.7.18" +PolyesterWeave = "0.2.2" +StaticArrays = "1.9.0" diff --git a/test/trim/main_clean.jl b/test/trim/main_clean.jl new file mode 100644 index 000000000..db868d96a --- /dev/null +++ b/test/trim/main_clean.jl @@ -0,0 +1,8 @@ +using TrimTest + +function (@main)(argv::Vector{String})::Cint + λ = parse(Float64, argv[1]) + sol = TrimTest.TestModuleClean.minimize(λ) + println(Core.stdout, sum(sol.u)) + return 0 +end diff --git a/test/trim/main_once_per_process.jl b/test/trim/main_once_per_process.jl new file mode 100644 index 000000000..e2f6a6908 --- /dev/null +++ b/test/trim/main_once_per_process.jl @@ -0,0 +1,10 @@ +module MyModule +include("./optimization_once_per_process.jl") +end + +function (@main)(argv::Vector{String})::Cint + λ = parse(Float64, argv[1]) + sol = MyModule.TestModuleTrimmable.minimize(λ) + println(Core.stdout, sum(sol.u)) + return 0 +end diff --git a/test/trim/main_segfault.jl b/test/trim/main_segfault.jl new file mode 100644 index 000000000..7bd48e7bd --- /dev/null +++ b/test/trim/main_segfault.jl @@ -0,0 +1,10 @@ +module MyModule +include("./optimization_trimmable.jl") +end + +function (@main)(argv::Vector{String})::Cint + λ = parse(Float64, argv[1]) + sol = MyModule.TestModuleTrimmable.minimize(λ) + println(Core.stdout, sum(sol.u)) + return 0 +end diff --git a/test/trim/main_trimmable.jl b/test/trim/main_trimmable.jl new file mode 100644 index 000000000..0ec717838 --- /dev/null +++ b/test/trim/main_trimmable.jl @@ -0,0 +1,8 @@ +using TrimTest + +function (@main)(argv::Vector{String})::Cint + λ = parse(Float64, argv[1]) + sol = TrimTest.TestModuleTrimmable.minimize(λ) + println(Core.stdout, sum(sol.u)) + return 0 +end diff --git a/test/trim/optimization_clean.jl b/test/trim/optimization_clean.jl new file mode 100644 index 000000000..ca6de20e3 --- /dev/null +++ b/test/trim/optimization_clean.jl @@ -0,0 +1,34 @@ +module TestModuleClean +using NonlinearSolveFirstOrder +using ADTypes: AutoForwardDiff +using ForwardDiff +using LinearAlgebra +using StaticArrays +using LinearSolve +const LS = LinearSolve + +function f(u, p) + L, U = cholesky(p.Σ) + rhs = (u .* u .- p.λ) + # there are some issues currently with LinearSolve and triangular matrices, + # so we just make `L` dense here. + linprob = LinearProblem(Matrix(L), rhs) + alg = LS.GenericLUFactorization() + sol = LinearSolve.solve(linprob, alg) + return sol.u +end + +struct MyParams{T, M} + λ::T + Σ::M +end + +function minimize(x) + autodiff = AutoForwardDiff(; chunksize = 1) + alg = TrustRegion(; autodiff, linsolve = LS.CholeskyFactorization()) + ps = MyParams(rand(), hermitianpart(rand(2, 2) + 2I)) + prob = NonlinearLeastSquaresProblem{false}(f, rand(2), ps) + sol = solve(prob, alg) + return sol +end +end diff --git a/test/trim/optimization_once_per_process.jl b/test/trim/optimization_once_per_process.jl new file mode 100644 index 000000000..706bfbfa4 --- /dev/null +++ b/test/trim/optimization_once_per_process.jl @@ -0,0 +1,45 @@ +module TestModuleTrimmable +using NonlinearSolveFirstOrder +using DiffEqBase +using ADTypes: AutoForwardDiff +using ForwardDiff +using LinearAlgebra +using StaticArrays +using LinearSolve +import SciMLBase +const LS = LinearSolve + +function f(u, p) + L, U = cholesky(p.Σ) + rhs = (u .* u .- p.λ) + # there are some issues currently with LinearSolve and triangular matrices, + # so we just make `L` dense here. + linprob = LinearProblem(Matrix(L), rhs) + alg = LS.GenericLUFactorization() + sol = LinearSolve.solve(linprob, alg) + return sol.u +end + +struct MyParams{T, M} + λ::T + Σ::M +end + +const cache = OncePerProcess() do + autodiff = AutoForwardDiff(; chunksize = 1) + alg = TrustRegion(; autodiff, linsolve = LS.CholeskyFactorization()) + prob = NonlinearLeastSquaresProblem{false}( + f, + rand(2), + MyParams(rand(), hermitianpart(rand(2, 2) + 2I)) + ) + init(prob, alg) +end + +function minimize(x) + ps = MyParams(x, hermitianpart(rand(2, 2) + 2I)) + reinit!(cache(), rand(2); p = ps) + solve!(cache()) + return cache +end +end diff --git a/test/trim/optimization_trimmable.jl b/test/trim/optimization_trimmable.jl new file mode 100644 index 000000000..e1ee111d7 --- /dev/null +++ b/test/trim/optimization_trimmable.jl @@ -0,0 +1,43 @@ +module TestModuleTrimmable +using NonlinearSolveFirstOrder +using DiffEqBase +using ADTypes: AutoForwardDiff +using ForwardDiff +using LinearAlgebra +using StaticArrays +using LinearSolve +import SciMLBase +const LS = LinearSolve + +function f(u, p) + L, U = cholesky(p.Σ) + rhs = (u .* u .- p.λ) + # there are some issues currently with LinearSolve and triangular matrices, + # so we just make `L` dense here. + linprob = LinearProblem(Matrix(L), rhs) + alg = LS.GenericLUFactorization() + sol = LinearSolve.solve(linprob, alg) + return sol.u +end + +struct MyParams{T, M} + λ::T + Σ::M +end + +const autodiff = AutoForwardDiff(; chunksize = 1) +const alg = TrustRegion(; autodiff, linsolve = LS.CholeskyFactorization()) +const prob = NonlinearLeastSquaresProblem{false}( + f, + rand(2), + MyParams(rand(), hermitianpart(rand(2, 2) + 2I)) +) +const cache = init(prob, alg) + +function minimize(x) + ps = MyParams(x, hermitianpart(rand(2, 2) + 2I)) + reinit!(cache, rand(2); p = ps) + solve!(cache) + return cache +end +end diff --git a/test/trim/runtests.jl b/test/trim/runtests.jl new file mode 100644 index 000000000..5af443581 --- /dev/null +++ b/test/trim/runtests.jl @@ -0,0 +1,78 @@ +using SafeTestsets + +@safetestset "Clean implementation (non-trimmable)" begin + using JET + using SciMLBase: successful_retcode + include("optimization_clean.jl") + @test successful_retcode(TestModuleClean.minimize(1.0).retcode) + # can't use `@test_opt` macro here because it would try to eval before + # `using JET` is processed + test_opt(TestModuleClean.minimize, (typeof(1.0),)) +end + +@safetestset "Trimmable implementation" begin + using JET + using SciMLBase: successful_retcode + include("optimization_trimmable.jl") + @test successful_retcode(TestModuleTrimmable.minimize(1.0).retcode) + # can't use `@test_opt` macro here because it would try to eval before + # `using JET` is processed + test_opt(TestModuleTrimmable.minimize, (typeof(1.0),)) +end + +@safetestset "Run trim" begin + # https://discourse.julialang.org/t/capture-stdout-and-stderr-in-case-a-command-fails/101772/3?u=romeov + """ + Run a Cmd object, returning the stdout & stderr contents plus the exit code + """ + function _execute(cmd::Cmd) + out = Pipe() + err = Pipe() + process = run(pipeline(ignorestatus(cmd); stdout = out, stderr = err)) + close(out.in) + close(err.in) + out = ( + stdout = String(read(out)), stderr = String(read(err)), + exitcode = process.exitcode + ) + return out + end + + JULIAC = normpath( + joinpath( + Sys.BINDIR, Base.DATAROOTDIR, "julia", "juliac", + "juliac.jl" + ) + ) + @test isfile(JULIAC) + + for (mainfile, expectedtopass) in [ + ("main_trimmable.jl", true), + #= The test below should verify that we indeed can't get a trimmed binary + # for the "clean" implementation, but will trigger in the future if + # it does start working. Unfortunately, right now it hangs indefinitely + # so we are commenting it out. =# + # ("main_clean.jl", false), + ("main_segfault.jl", false), + ] + binpath = tempname() + cmd = `$(Base.julia_cmd()) --project=. --depwarn=error $(JULIAC) --experimental --trim=unsafe-warn --output-exe $(binpath) $(mainfile)` + + # since we are calling Julia from Julia, we first need to clean some + # environment variables + clean_env = copy(ENV) + delete!(clean_env, "JULIA_PROJECT") + delete!(clean_env, "JULIA_LOAD_PATH") + # We could just check for success, but then failures are hard to debug. + # Instead we use `_execute` to also capture `stdout` and `stderr`. + # @test success(setenv(cmd, clean_env)) + trimcall = _execute(setenv(cmd, clean_env; dir = @__DIR__)) + if trimcall.exitcode != 0 && expectedtopass + @show trimcall.stdout + @show trimcall.stderr + end + @test trimcall.exitcode == 0 broken = !expectedtopass + @test isfile(binpath) broken = !expectedtopass + @test success(`$(binpath) 1.0`) broken = !expectedtopass + end +end diff --git a/test/trim/src/TrimTest.jl b/test/trim/src/TrimTest.jl new file mode 100644 index 000000000..85d0f78fe --- /dev/null +++ b/test/trim/src/TrimTest.jl @@ -0,0 +1,27 @@ +module TrimTest +#= +Currently, trimming only works if the target code is in a package. I.e., trying to trim +```julia +include("optimization_trimmable.jl") +function (@main)(argv::Vector{String})::Cint + minimize(1.0) + return 0 +end +``` +or even +```julia +mod MyMod + include("optimization_trimmable.jl") +end +function (@main)(argv::Vector{String})::Cint + MyMod.minimize(1.0) + return 0 +end +``` +segfaults `juliac`. Looking at the segfault stacktrace it seems the culprit is +`const cache = init(...)`. Either way, we circumvent the segfault by putting +this below code into a package definition. +=# +include("../optimization_trimmable.jl") +include("../optimization_clean.jl") +end