diff --git a/benchmark/README.md b/benchmark/README.md index 984d77d7..5023d5e4 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -1,5 +1,10 @@ To run benchmarks, run the following script. ``` # to compare current version and master using 10 cores and CUTEst and PowerModels.jl problems, -julia runbenchmarks.jl 10 power cutest current master +julia runbenchmarks.jl 10 power cutest current master +``` + +By default, logs are deactivated. To active them, please add `verbose` to the options: +``` +julia runbenchmarks.jl 10 power cutest current master verbose ``` diff --git a/benchmark/benchmark-cutest.jl b/benchmark/benchmark-cutest.jl index d12a6ead..f5c49375 100644 --- a/benchmark/benchmark-cutest.jl +++ b/benchmark/benchmark-cutest.jl @@ -1,6 +1,6 @@ include("config.jl") -function get_status(code::Symbol) +function get_status(code::Symbol) if code == :first_order return 1 elseif code == :acceptable @@ -13,13 +13,13 @@ end @everywhere using CUTEst if SOLVER == "master" - @everywhere solver = nlp -> madnlp(nlp,linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6) + @everywhere solver = nlp -> madnlp(nlp,linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6, print_level=PRINT_LEVEL) @everywhere using MadNLP, MadNLPHSL elseif SOLVER == "current" - @everywhere solver = nlp -> madnlp(nlp,linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6) + @everywhere solver = nlp -> madnlp(nlp,linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6, print_level=PRINT_LEVEL) @everywhere using MadNLP, MadNLPHSL elseif SOLVER == "ipopt" - @everywhere solver = nlp -> ipopt(nlp,linear_solver="ma57",max_cpu_time=900.,tol=1e-6) + @everywhere solver = nlp -> ipopt(nlp,linear_solver="ma57",max_cpu_time=900.,tol=1e-6, print_level=PRINT_LEVEL) @everywhere using NLPModelsIpopt elseif SOLVER == "knitro" # TODO @@ -51,7 +51,7 @@ function benchmark(solver,probs;warm_up_probs = []) println("Decoding problems") broadcast(decodemodel,probs) - + println("Solving problems") retvals = pmap(prob->evalmodel(prob,solver),probs) time = [retval.elapsed_time for retval in retvals] @@ -63,7 +63,7 @@ exclude = [ "PFIT1","PFIT2","PFIT4","DENSCHNE","SPECANNE","DJTL", "EG3","OET7", "PRIMAL3","TAX213322","TAXR213322","TAX53322","TAXR53322","HIMMELP2","MOSARQP2","LUKVLE11", "CYCLOOCT","CYCLOOCF","LIPPERT1","GAUSSELM","A2NSSSSL", - "YATP1LS","YATP2LS","YATP1CLS","YATP2CLS","BA-L52LS","BA-L73LS","BA-L21LS","CRESC132" + "YATP1LS","YATP2LS","YATP1CLS","YATP2CLS","BA-L52LS","BA-L73LS","BA-L21LS","CRESC132" ] @@ -76,4 +76,4 @@ time,status = benchmark(solver,probs;warm_up_probs = ["EIGMINA"]) writedlm("name-cutest.csv",probs,',') writedlm("time-cutest-$(SOLVER).csv",time,',') -writedlm("status-cutest-$(SOLVER).csv",status),',' +writedlm("status-cutest-$(SOLVER).csv",status),',' diff --git a/benchmark/benchmark-power.jl b/benchmark/benchmark-power.jl index 0c1d7dee..cbd15161 100644 --- a/benchmark/benchmark-power.jl +++ b/benchmark/benchmark-power.jl @@ -1,9 +1,16 @@ -const PGLIB_PATH = ENV["PGLIB_PATH"] +if haskey(ENV, "PGLIB_PATH") + const PGLIB_PATH = ENV["PGLIB_PATH"] +else + error("Unable to find path to PGLIB benchmark.\n"* + "Please set environment variable `PGLIB_PATH` to run benchmark with PowerModels.jl") +end include("config.jl") -using MathOptInterface; const MOI=MathOptInterface -function get_status(code::MOI.TerminationStatusCode) +using MathOptInterface +const MOI = MathOptInterface + +function get_status(code::MOI.TerminationStatusCode) if code == MOI.LOCALLY_SOLVED return 1 elseif code == MOI.ALMOST_OPTIMAL @@ -19,17 +26,17 @@ end if SOLVER == "master" @everywhere solver = prob -> run_opf(joinpath(PGLIB_PATH,prob[1]), prob[2], - ()->MadNLP.Optimizer(linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6)) + ()->MadNLP.Optimizer(linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6, print_level=PRINT_LEVEL)) @everywhere using MadNLP, MadNLPHSL elseif SOLVER == "current" @everywhere solver = prob -> run_opf(joinpath(PGLIB_PATH,prob[1]), prob[2], - ()->MadNLP.Optimizer(linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6)) + ()->MadNLP.Optimizer(linear_solver=MadNLPMa57,max_wall_time=900.,tol=1e-6, print_level=PRINT_LEVEL)) @everywhere using MadNLP, MadNLPHSL elseif SOLVER == "ipopt" @everywhere solver = prob -> run_opf(joinpath(PGLIB_PATH,prob[1]), prob[2], - ()->Ipopt.Optimizer(linear_solver="ma57",max_cpu_time=900.,tol=1e-6)) + ()->Ipopt.Optimizer(linear_solver="ma57",max_cpu_time=900.,tol=1e-6, print_level=PRINT_LEVEL)) @everywhere using Ipopt elseif SOLVER == "knitro" # TODO @@ -53,7 +60,7 @@ function benchmark(solver,probs;warm_up_probs = []) rs = [remotecall.(solver,i,warm_up_probs) for i in procs() if i!= 1] ws = [wait.(r) for r in rs] fs= [fetch.(r) for r in rs] - + println("Solving problems") retvals = pmap(prob->evalmodel(prob,solver),probs) time = [retval["solve_time"] for retval in retvals] @@ -73,4 +80,4 @@ time,status = benchmark(solver,probs;warm_up_probs = [("pglib_opf_case1888_rte.m writedlm("name-power.csv",name,',') writedlm("time-power-$(SOLVER).csv",time) -writedlm("status-power-$(SOLVER).csv",status) +writedlm("status-power-$(SOLVER).csv",status) diff --git a/benchmark/config.jl b/benchmark/config.jl index 3f9f7203..f68a6c70 100644 --- a/benchmark/config.jl +++ b/benchmark/config.jl @@ -3,6 +3,7 @@ using Pkg, Distributed, DelimitedFiles const NP = ARGS[1] const SOLVER = ARGS[2] + addprocs(parse(Int,NP),exeflags="--project=.") Pkg.instantiate() @@ -19,3 +20,13 @@ elseif SOLVER == "knitro" else error("Proper ARGS should be given") end + +# Set verbose option +if SOLVER == "ipopt" + const PRINT_LEVEL = (ARGS[3] == "verbose") ? 5 : 0 +elseif SOLVER == "knitro" + const PRINT_LEVEL = (ARGS[3] == "verbose") ? 3 : 0 +else + using MadNLP + const PRINT_LEVEL = (ARGS[3] == "verbose") ? MadNLP.INFO : MadNLP.ERROR +end diff --git a/benchmark/runbenchmarks.jl b/benchmark/runbenchmarks.jl index fd31c1b9..a6379185 100644 --- a/benchmark/runbenchmarks.jl +++ b/benchmark/runbenchmarks.jl @@ -2,13 +2,19 @@ const NP = ARGS[1] const CLASSES = filter(e-> e in ["cutest","power"], ARGS) const SOLVERS = filter(e-> e in ["current","master","ipopt","knitro"], ARGS) +const PROJECT_PATH = dirname(@__FILE__) +const VERBOSE = ("verbose" in ARGS) ? "verbose" : "none" -cd(@__DIR__) -cp(".Project.toml","Project.toml",force=true) +cp( + joinpath(PROJECT_PATH, ".Project.toml"), + joinpath(PROJECT_PATH, "Project.toml"), + force=true +) for class in CLASSES for solver in SOLVERS - run(`julia --project=. benchmark-$class.jl $NP $solver`) + launch_script = joinpath(PROJECT_PATH, "benchmark-$class.jl") + run(`julia --project=$PROJECT_PATH $launch_script $NP $solver $VERBOSE`) end end