diff --git a/.travis.yml b/.travis.yml index 641a0acc..2acb375f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,9 +6,6 @@ os: #- windows julia: - 1.1 #Long-term support (LTS) release: v1.0.5 (Sep 9, 2019) - #- 1.2 - #- 1.3 - #- 1.4 - 1.5 #- nightly notifications: diff --git a/Project.toml b/Project.toml index bc0c522c..4536b83e 100644 --- a/Project.toml +++ b/Project.toml @@ -1,9 +1,8 @@ name = "Stopping" uuid = "c4fe5a9e-e7fb-5c3d-89d5-7f405ab2214f" -authors = ["Jean-Pierre Dussault ","Tangi Migot ","Sam Goyette "] +authors = ["Jean-Pierre Dussault ", "Tangi Migot ", "Sam Goyette "] version = "0.2.5" - [deps] DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -14,7 +13,6 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] -#NLPModels = "0.10.0, 1" julia = "^1.0.0" [extras] diff --git a/docs/make.jl b/docs/make.jl index 39d04dfd..16bf74c4 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,6 +8,16 @@ makedocs( pages = [ "Home" => "index.md", "API" => "api.md", + "Stopping's ID" => "idcard.md", + "State's ID" => "idcard-state.md", + "Meta's ID" => "idcard-stoppingmeta.md", + "Optimality in Stopping" => "howstopcheckoptimality.md", + "Stopping in action" => "example-basic-Newton.md", + "Stop remote control" => "idcard-stopremote.md", + "Stopping workflow" => "stop-workflow.md", + "NLPStopping" => "nlpstopping.md", + "LAStopping" => "lastopping.md", + "Readme" => "index_tuto.md", "Examples and tutorials" => "tutorial.md", "How to State" => "howtostate.md", "How to State for NLPs" => "howtostate-nlp.md", @@ -30,5 +40,4 @@ makedocs( # See "Hosting Documentation" and deploydocs() in the Documenter manual # for more information. deploydocs(repo = "github.com/vepiteski/Stopping.jl") -#deploydocs(repo = "github.com/Goysa2/Stopping.jl")# #https://juliadocs.github.io/Documenter.jl/stable/man/hosting/ ? diff --git a/docs/src/example-basic-Newton.md b/docs/src/example-basic-Newton.md new file mode 100644 index 00000000..113533e0 --- /dev/null +++ b/docs/src/example-basic-Newton.md @@ -0,0 +1,39 @@ +## Example I: Stopping in the flow + +We present here a typical iterative algorithm to illustrate how to use Stopping. + +```julia +function rand_solver(stp :: AbstractStopping, x0 :: AbstractVector) + + x = x0 + #First, call start! to check optimality and set an initial configuration + OK = update_and_start!(stp, x = x) + + while !OK + #Run some computations and update the iterate + d = rand(length(x)) + x += d + + #Update the State and call the Stopping with stop! + OK = update_and_stop!(stp, x = x, d = d) + end + + return stp +end +``` +This example shows the most basic features of Stopping. It does many checks for you. In this innocent-looking algorithm, the call to `update_and_start!` and `update_and_stop!` will verifies unboundedness of `x`, the time spent in the algorithm, the number of iterations (= number of call to `stop!`), and the domain of `x` (in case some of its components become `NaN` for instance). + +### FAQ: How can I remove some checks done by Stopping? +The native instances of `AbstractStopping` available in Stopping.jl all contain an attribute `stop_remote`. +This is a remote control for Stopping's checks. +```julia +typeof(stp.stop_remote) <: StopRemoteControl #stop_remote is an instance of StopRemoteControl +``` +This attributes contains boolean values for each check done by Stopping, see +```julia +fieldnames(stp.stop_remote) #get all the attributes of the remote control +``` +For instance, we can remove the unboundedness and domain check done on `x` by setting: +```julia +stp.stop_remote = StopRemoteControl(unbounded_and_domain_x_check = false) +``` diff --git a/docs/src/howstopcheckoptimality.md b/docs/src/howstopcheckoptimality.md new file mode 100644 index 00000000..02493a17 --- /dev/null +++ b/docs/src/howstopcheckoptimality.md @@ -0,0 +1,37 @@ +## How Stopping checks for optimality + +The solver can let Stopping handles the optimality checks. We see here how it works and how to tune it in. + +First, the function `stop!` computes a **score** using `optimality_check` function given in the `meta`. The keywords argument given in `stop!` are passed to this function. +```julia +#Compute the score if !src.optimality_check +score = stp.meta.optimality_check(stp.pb, stp.current_state; kwargs...)) +``` +The **score** is then stored in `stp.current_state.current_score`. If the **score** doesn't contain any NaN, Stopping proceeds and test whether it is within tolerances given as functions in `meta.tol_check` and `meta.tol_check_neg`. +```julia +#Compute the tolerances +check_pos, check_neg = tol_check(stp.meta) +#Test the score vs the tolerances +optimal = _inequality_check(optimality, check_pos, check_neg) +``` +So, overall Stopping does: +```julia +check_pos = stp.meta.tol_check(stp.meta.atol, stp.meta.rtol, stp.meta.optimality0) +check_neg = stp.meta.tol_check_neg(stp.meta.atol, stp.meta.rtol, stp.meta.optimality0) +score = stp.meta.optimality_check(stp.pb, stp.current_state) +check_pos ≤ score ≤ check_neg +``` + +### FAQ: Does it work for vector scores as well? + +The type of the score and tolerances are respectively initialized in the State and the Meta at the initialization of the Stopping. Hence one can use vectorized scores as long as they can be compared with the tolerances. For instance: +- The score is a vector and tolerances are vectors of the same length or numbers. +- The score is a tuple and tolerances are tuple or a number. + +### FAQ: How do I implement AND and OR conditions? +The concatenation of two scores (AND condition) that need to be tested to zero can be represented as a vector. +The disjunction of two score (OR condition) are represented as tuple. + +### FAQ: Do Stopping really computes the tolerances each time? + +It does unless `meta.retol` is set as `true`. This entry can be set as true from the beginning as the `tol_check` functions are evaluated once at the initialization of the `meta`. diff --git a/docs/src/idcard-state.md b/docs/src/idcard-state.md new file mode 100644 index 00000000..bb04ba7c --- /dev/null +++ b/docs/src/idcard-state.md @@ -0,0 +1,92 @@ +## State + +As discussed before each Stopping contains a `current_state :: AbstractState` attribute containing the current information/state of the problem. When running the iterative loop, the `State` is updated and the `Stopping` make a decision based on this information. + +The `current_state` contains all the information relative to a problem. We implemented three instances as an illustration: +- `GenericState` ; +- `NLPAtX` representing the state of an `NLPModel`; +- `OneDAtX` for 1D optimization problems. + +`GenericState` is an illustration of the behavior of such object that minimally contains: +- `x` the current iterate; +- `d` the current direction; +- `res` the current residual; +- `current_time` the current time; +- `current_score` the current optimality score. + +By convention, `x` and `current_score` are mandatory information, and the other attribute are initialized with keywords arguments: +```julia +GenericState(zeros(n), 0.0, d = zeros(n), current_time = NaN) +``` +the alternative would be +```julia +GenericState(zeros(n), d = zeros(n), current_time = NaN) +``` + +Beyond the use inside Stopping, returning the State also provides the user the opportunity to use some of the information computed by the algorithm. + +### FAQ: Are there Type constraints when initializing a State? + +Yes, an AbstractState{S,T} is actually a paramtric type where `S` is the type of the `current_score` and `T` is the type of `x`. +```julia +x0, score0 = rand(n), Array{Float64,1}(undef, n) +GenericState(x0, score0) #is an AbstractState{Array{Float64,1}, Array{Float64,1}} +``` +By default, the `current_score` is a real number, hence +```julia +x0 = rand(n) +GenericState(x0) #is an AbstractState{Float64, Array{Float64,1}} +``` +These types can be obtained with the functions `xtype` and `scoretype`: +```julia +scoretype(stp.current_state) +xtype(stp.current_state) +``` + +### FAQ: Can I design a tailored State for my problem? + +`NLPAtX` is an illustration of a more evolved instance associated to `NLPModels` for nonlinear optimization models. It contains: +```julia +mutable struct NLPAtX{S, T <: AbstractVector, MT <: AbstractMatrix} <: AbstractState{S, T} +#Unconstrained State + x :: T # current point + fx :: eltype(T) # objective function + gx :: T # gradient size: x + Hx :: MT # hessian size: |x| x |x| +#Bounds State + mu :: T # Lagrange multipliers with bounds size of |x| +#Constrained State + cx :: T # vector of constraints lc <= c(x) <= uc + Jx :: MT # jacobian matrix, size: |lambda| x |x| + lambda :: T # Lagrange multipliers + + d :: T #search direction + res :: T #residual + #Resources State + current_time :: Float64 + current_score :: S + evals :: Counters + + function NLPAtX(x :: T, + lambda :: T, + current_score :: S; + fx :: eltype(T) = _init_field(eltype(T)), + gx :: T = _init_field(T), + Hx :: AbstractMatrix = _init_field(Matrix{eltype(T)}), + mu :: T = _init_field(T), + cx :: T = _init_field(T), + Jx :: AbstractMatrix = _init_field(Matrix{eltype(T)}), + d :: T = _init_field(T), + res :: T = _init_field(T), + current_time :: Float64 = NaN, + evals :: Counters = Counters() + ) where {S, T <: AbstractVector} + + _size_check(x, lambda, fx, gx, Hx, mu, cx, Jx) + + return new{S, T, Matrix{eltype(T)}}(x, fx, gx, Hx, mu, cx, Jx, lambda, d, + res, current_time, current_score, evals) + end +end +``` +`_init_field(T)` initializes a value for a given type guaranteing type stability and minimal storage. diff --git a/docs/src/idcard-stoppingmeta.md b/docs/src/idcard-stoppingmeta.md new file mode 100644 index 00000000..4e9539f9 --- /dev/null +++ b/docs/src/idcard-stoppingmeta.md @@ -0,0 +1,52 @@ +## Stopping's attributes ID: StoppingMeta + +Usual instances of `AbstractStopping` contains a `StoppingMeta <: <: AbstractStoppingMeta` (`stp.meta`), which controls the various tolerances and thresholds used by the functions `start!` and `stop!`. +- `atol :: Number = 1.0e-6` +- `rtol :: Number = 1.0e-15` +- `optimality0 :: Number = 1.0` +- `tol_check :: Function = (atol :: Number, rtol :: Number, opt0 :: Number) -> max(atol,rtol*opt0)` +- `tol_check_neg :: Function = (atol :: Number, rtol :: Number, opt0 :: Number) -> - tol_check(atol,rtol,opt0)` +- `optimality_check :: Function = (a,b) -> Inf` +- `retol :: Bool = true` +- `unbounded_threshold :: Number = 1.0e50, #typemax(Float64)` +- `unbounded_x :: Number = 1.0e50` +- `max_f :: Int = typemax(Int)` +- `max_cntrs :: Dict{Symbol,Int} = Dict{Symbol,Int64}()` +- `max_eval :: Int = 20000` +- `max_iter :: Int = 5000` +- `max_time :: Float64 = 300.0` +- `start_time :: Float64 = NaN` +- `meta_user_struct :: Any = nothing` +- `user_check_func! :: Function = (stp :: AbstractStopping, start :: Bool) -> nothing` +The default constructor for the meta uses above values, and they can all be modified using keywords +```julia +meta = StoppingMeta(rtol = 0.0) #will set `rtol` as 0.0. +``` + +`StoppingMeta` also contains the various status related to the checks: +```julia +OK_check(meta) #returns true if one of the check is true. +``` + +### FAQ: Are there Type constraints when initializing a StoppingMeta? + +An `StoppingMeta{TolType, CheckType, MUS, IntType}` is actually a paramtric type: +```julia +checktype(meta) #CheckType: return type of `tol_check` and `tol_check_neg` +toltype(meta) #TolType: type of the tolerances +metausertype(meta) #MUS: type of the user-defined structure +inttype(meta) #IntType: type of integer tolerances +``` + +### FAQ: What is `user_check_func!`? + +This is a callback function called in the execution of the function `stop!` or `start!`. This function takes two input `stp <: AbstractStopping` and a boolean set as `true` if called from `start!` and `false` if called from `stop!`. To eventually returns a stopping status, the function has to update `stp.meta.stopbyuser`. + +For instance, if one want to stop when $$\log(x) < 1$$ in `stop!`: +```julia +function test(stp, start) + stp.meta.stopbyuser = !start && (log(stp.current_state.x) < 1) +end +user_check_func! = test +``` +The exclamation mark (!) is a naming convention used when the function modifies input. diff --git a/docs/src/idcard-stopremote.md b/docs/src/idcard-stopremote.md new file mode 100644 index 00000000..849429cb --- /dev/null +++ b/docs/src/idcard-stopremote.md @@ -0,0 +1,37 @@ +## Stopping's attributes ID: StopRemoteControl + +Usual instances of `AbstractStopping` contains a `StopRemoteControl <: AbstractStopRemoteControl` (`stp.stop_remote`), which controls the various checks run by the functions `start!` and `stop!`. An instance of `StopRemoteControl` contains: +- `unbounded_and_domain_x_check :: Bool` +- `domain_check :: Bool` +- `optimality_check :: Bool` +- `infeasibility_check :: Bool` +- `unbounded_problem_check :: Bool` +- `tired_check :: Bool` +- `resources_check :: Bool` +- `stalled_check :: Bool` +- `iteration_check :: Bool` +- `main_pb_check :: Bool` +- `user_check :: Bool` +- `user_start_check :: Bool` +- `cheap_check :: Bool` +Only the last attributes, `cheap_check`, is not related with a specific check. Set as `true`, it stopped whenever one of the checks is successful and the algorithm needs to stop. It is `false` by default. All the other entries are set as `true` by default, i.e. +```julia +#initializes a remote control with all the checks on. +src = StopRemoteControl() +``` +In order to remove some checks, it suffices to use keywords: +```julia +#remove time and iteration checks. +src = StopRemoteControl(tired_check = false, iteration_check = false) +``` + +### FAQ: Is there performance issues with all these checks? +Assuming that `x` is a vector of length `n`, some of these checks are indeed in O(n), which can be undesirable for some applications. In this case, you can either initialize a "cheap" remote control as follows +```julia +#initialize a StopRemoteControl with 0(n) checks set as false +src = cheap_stop_remote_control() +``` +or deactivate the tests by hand as shown previously. + +### FAQ: How can I fine-tune these checks? +All these checks can be fine-tuned by selecting entries in the `StoppingMeta`. diff --git a/docs/src/idcard.md b/docs/src/idcard.md new file mode 100644 index 00000000..f7006c9c --- /dev/null +++ b/docs/src/idcard.md @@ -0,0 +1,39 @@ +## Stopping + +A Stopping is an instance (a subtype) of an `AbstractStopping`. Such instances minimally contain: +- `problem :: Any` an arbitrary instance of a problem; +- `meta :: AbstractStoppingMeta` contains the used parameters and stopping statuses; +- `current_state :: AbstractState` current information/state of the problem. + +While the `problem` is up to the user, the `meta` and the `current_state` are specific features of Stopping.jl. +The `meta` contains all the parameters relative to the stopping criteria (tolerances, limits ...). We implemented +`StoppingMeta()` which offers a set of default parameters that can be easily modified with keyword arguments. See [StoppingMeta](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/StoppingMetamod.jl) for more detailed information. The native instances of `AbstractStopping` (`GenericStopping`, `NLPStoppping`, etc) contains more attributes (`stop_remote`, `main_stp`, `listofstates`, `stopping_user_struct`) that we will developed later on. + +The `current_state` contains all the information relative to a problem. We implemented a `GenericState` as an +illustration of the behavior of such object that typically contains: +- `x` the current iterate; +- `d` the current direction; +- `res` the current residual; +- `current_time` the current time; +- `current_score` the current optimality score; +- ... other information relative to the problems. + +When running the iterative loop, the `State` is updated and the `Stopping` make a decision based on this information. + +## Main Methods + +Stopping's main behavior is represented by two functions: +* `start!(:: AbstractStopping)` initializes the time and the tolerance at the starting point and stopping criteria. +* `stop!(:: AbstractStopping)` checks stopping criteria + +Stopping uses the information furnished by the State to make a decision. Communication between the two can be done through the following functions: +* `update_and_start!(stp :: AbstractStopping; kwargs...)` updates the states with information furnished as kwargs and then call start!. +* `update_and_stop!(stp :: AbstractStopping; kwargs...)` updates the states with information furnished as kwargs and then call stop!. +* `fill_in!(stp :: AbstractStopping, x :: xtype(stp.current_state))` a function that fills in all the State with all the information required to evaluate the stopping functions correctly. This can reveal useful, for instance, if the user do not trust the information furnished by the algorithm in the State. +* `reinit!(stp :: AbstractStopping)` reinitialize the entries of the Stopping to reuse for another call. + +### FAQ: How do I get more information? +As usual in Julia, we can use `?` to get functions' documentation. +```julia +? Stopping.stop! +``` diff --git a/docs/src/index.md b/docs/src/index.md index 870346b4..a2b411dd 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,63 +1,22 @@ # Stopping.jl -Documentation for Stopping.jl +This package provides general tools for the uniformization of stopping criteria for iterative solvers. +When calling an iterative solver, four outcomes may happen: -## Purpose +1. An approximate solution is obtained; +2. The problem is declared unsolvable (unboundedness, infeasibility, etc); +3. The maximum available resources are not sufficient to compute the solution; +4. An algorithm's dependent failure happens. -Tools to ease the uniformization of stopping criteria in iterative solvers. +There are many advantages in using Stopping: +- Make your code more readable by outsourcing some tests to Stopping; +- Let the user a hand on the stopping criteria; +- Encourage reusability of codes. -When a solver is called on an optimization model, four outcomes may happen: - -1. the approximate solution is obtained, the problem is considered solved -2. the problem is declared unsolvable (unboundedness, infeasibility ...) -3. the maximum available resources are not sufficient to compute the solution -4. some algorithm dependent failure happens - -This tool eases the first three items above. It defines a type - - mutable struct GenericStopping <: AbstractStopping - problem :: Any # an arbitrary instance of a problem - meta :: AbstractStoppingMeta # contains the used parameters and stopping status - current_state :: AbstractState # Current information on the problem - main_stp :: Union{AbstractStopping, Nothing} # Stopping of the main problem, or nothing - listofstates :: Union{ListStates, Nothing} # History of states - user_specific_struct :: Any # User-specific structure - -The [StoppingMeta](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/StoppingMetamod.jl) provides default tolerances, maximum resources, ... as well as (boolean) information on the result. - -### Your Stopping your way - -The GenericStopping (with GenericState) provides a complete structure to handle stopping criteria. -Then, depending on the problem structure, you can specialize a new Stopping by -redefining a State and some functions specific to your problem. - -We provide some specialization of the GenericStopping for optimization: - * [NLPStopping](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/NLPStoppingmod.jl) with [NLPAtX](https://github.com/vepiteski/Stopping.jl/blob/master/src/State/NLPAtXmod.jl) as a specialized State: for non-linear programming (based on [NLPModels](https://github.com/JuliaSmoothOptimizers/NLPModels.jl)); - * [LAStopping](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/LinearAlgebraStopping.jl) with [GenericState](https://github.com/vepiteski/Stopping.jl/blob/master/src/State/GenericStatemod.jl): for linear algebra problems. - * [LS_Stopping](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/LineSearchStoppingmod.jl) with [LSAtT](https://github.com/vepiteski/Stopping.jl/blob/master/src/State/LSAtTmod.jl) as a specialized State: for 1d optimization; - * more to come... - -## Functions - -The tool provides two main functions: -* `start!(stp :: AbstractStopping)` initializes the time and the tolerance at the starting point and check wether the initial guess is optimal. -* `stop!(stp :: AbstractStopping)` checks optimality of the current guess as well as failure of the system (unboundedness for instance) and maximum resources (number of evaluations of functions, elapsed time ...) - -Stopping uses the informations furnished by the State to evaluate its functions. Communication between the two can be done through the following functions: -* `update_and_start!(stp :: AbstractStopping; kwargs...)` updates the states with informations furnished as kwargs and then call start!. -* `update_and_stop!(stp :: AbstractStopping; kwargs...)` updates the states with informations furnished as kwargs and then call stop!. -* `fill_in!(stp :: AbstractStopping, x :: Iterate)` a function that fill in all the State with all the informations required to correctly evaluate the stopping functions. This can reveal useful, for instance, if the user do not trust the informations furnished by the algorithm in the State. -* `reinit!(stp :: AbstractStopping)` reinitialize the entries of -the Stopping to reuse for another call. - -Consult the [HowTo tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/runhowto.jl) to learn more about the possibilities offered by Stopping. - -You can also access other examples of algorithms in the [test/examples](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/) folder, which for instance illustrate the strenght of Stopping with subproblems: -* Consult the [OptimSolver tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/run-optimsolver.jl) for more on how to use Stopping with nested algorithms. -* Check the [Benchmark tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/benchmark.jl) to see how Stopping can combined with [SolverBenchmark.jl](https://juliasmoothoptimizers.github.io/SolverBenchmark.jl/). -* Stopping can be adapted to closed solvers via a buffer function as in [Buffer tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/buffer.jl) for an instance with [Ipopt](https://github.com/JuliaOpt/Ipopt.jl) via [NLPModelsIpopt](https://github.com/JuliaSmoothOptimizers/NLPModelsIpopt.jl). +Stopping.jl offers several advanced facilities, but a basic usage is already beneficial for your code. ## How to install + Install and test the Stopping package with the Julia package manager: ```julia pkg> add Stopping @@ -67,80 +26,14 @@ You can access the most up-to-date version of the Stopping package using: ```julia pkg> add https://github.com/vepiteski/Stopping.jl pkg> test Stopping -pkg> status Stopping -``` -## Example - -As an example, a naive version of the Newton method is provided [here](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/newton.jl). First we import the packages: -``` -using LinearAlgebra, NLPModels, Stopping -``` -We consider a quadratic test function, and create an uncontrained quadratic optimization problem using [NLPModels](https://github.com/JuliaSmoothOptimizers/NLPModels.jl): -``` -A = rand(5, 5); Q = A' * A; -f(x) = 0.5 * x' * Q * x -nlp = ADNLPModel(f, ones(5)) -``` - -We now initialize the *NLPStopping*. First create a State. -``` -nlp_at_x = NLPAtX(ones(5)) -``` -We use [unconstrained_check](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/nlp_admissible_functions.jl) as an optimality function -``` -stop_nlp = NLPStopping(nlp, nlp_at_x, optimality_check = unconstrained_check) -``` -Note that, since we used a default State, an alternative would have been: -``` -stop_nlp = NLPStopping(nlp) -``` - -Now a basic version of Newton to illustrate how to use Stopping. -``` -function newton(stp :: NLPStopping) - - #Notations - pb = stp.pb; state = stp.current_state; - #Initialization - xt = state.x - - #First, call start! to check optimality and set an initial configuration - #(start the time counter, set relative error ...) - OK = update_and_start!(stp, x = xt, gx = grad(pb, xt), Hx = hess(pb, xt)) - - while !OK - #Compute the Newton direction (state.Hx only has the lower triangular) - d = (state.Hx + state.Hx' - diagm(0 => diag(state.Hx))) \ (- state.gx) - #Update the iterate - xt = xt + d - #Update the State and call the Stopping with stop! - OK = update_and_stop!(stp, x = xt, gx = grad(pb, xt), Hx = hess(pb, xt)) - end - - return stp -end -``` -Finally, we can call the algorithm with our Stopping: -``` -stop_nlp = newton(stop_nlp) -``` -and consult the Stopping to know what happened -``` -#We can then ask stop_nlp the final status -@test :Optimal in status(stop_nlp, list = true) -#Explore the final values in stop_nlp.current_state -printstyled("Final solution is $(stop_nlp.current_state.x)", color = :green) ``` -We reached optimality, and thanks to the Stopping structure this simple looking -algorithm verified at each step of the algorithm: -- time limit has been respected; -- evaluations of the problem are not excessive; -- the problem is not unbounded (w.r.t. x and f(x)); -- there is no NaN in x, f(x), g(x), H(x); -- the maximum number of iteration (call to stop!) is limited. -## Long-Term Goals +Stopping.jl most evolved facilities are based on [JuliaSmoothOptimizers' tools](juliasmoothoptimizers.github.io/). -Stopping is aimed as a tool for improving the reusability and robustness in the implementation of iterative algorithms. We warmly welcome any feedback or comment leading to potential improvements. +## Stopping.jl in action -Future work will address more sophisticated problems such as mixed-integer optimization problems, optimization with uncertainty. The list of suggested optimality functions will be enriched with state of the art conditions. +Stopping.jl is already used in other Julia packages: +- [StoppingInterface.jl](https://github.com/tmigot/StoppingInterface.jl): an interface between Stopping.jl and the outside world; +- [MPCCSolver.jl](https://github.com/tmigot/MPCCSolver.jl): solver for mathematical programs with complementarity constraints; +- [FletcherPenaltyNLPSolver](https://github.com/tmigot/FletcherPenaltyNLPSolver): solver for nonlinear optimization models with Fletcher's penalty method; +- ... diff --git a/docs/src/index_tuto.md b/docs/src/index_tuto.md new file mode 100644 index 00000000..870346b4 --- /dev/null +++ b/docs/src/index_tuto.md @@ -0,0 +1,146 @@ +# Stopping.jl + +Documentation for Stopping.jl + +## Purpose + +Tools to ease the uniformization of stopping criteria in iterative solvers. + +When a solver is called on an optimization model, four outcomes may happen: + +1. the approximate solution is obtained, the problem is considered solved +2. the problem is declared unsolvable (unboundedness, infeasibility ...) +3. the maximum available resources are not sufficient to compute the solution +4. some algorithm dependent failure happens + +This tool eases the first three items above. It defines a type + + mutable struct GenericStopping <: AbstractStopping + problem :: Any # an arbitrary instance of a problem + meta :: AbstractStoppingMeta # contains the used parameters and stopping status + current_state :: AbstractState # Current information on the problem + main_stp :: Union{AbstractStopping, Nothing} # Stopping of the main problem, or nothing + listofstates :: Union{ListStates, Nothing} # History of states + user_specific_struct :: Any # User-specific structure + +The [StoppingMeta](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/StoppingMetamod.jl) provides default tolerances, maximum resources, ... as well as (boolean) information on the result. + +### Your Stopping your way + +The GenericStopping (with GenericState) provides a complete structure to handle stopping criteria. +Then, depending on the problem structure, you can specialize a new Stopping by +redefining a State and some functions specific to your problem. + +We provide some specialization of the GenericStopping for optimization: + * [NLPStopping](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/NLPStoppingmod.jl) with [NLPAtX](https://github.com/vepiteski/Stopping.jl/blob/master/src/State/NLPAtXmod.jl) as a specialized State: for non-linear programming (based on [NLPModels](https://github.com/JuliaSmoothOptimizers/NLPModels.jl)); + * [LAStopping](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/LinearAlgebraStopping.jl) with [GenericState](https://github.com/vepiteski/Stopping.jl/blob/master/src/State/GenericStatemod.jl): for linear algebra problems. + * [LS_Stopping](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/LineSearchStoppingmod.jl) with [LSAtT](https://github.com/vepiteski/Stopping.jl/blob/master/src/State/LSAtTmod.jl) as a specialized State: for 1d optimization; + * more to come... + +## Functions + +The tool provides two main functions: +* `start!(stp :: AbstractStopping)` initializes the time and the tolerance at the starting point and check wether the initial guess is optimal. +* `stop!(stp :: AbstractStopping)` checks optimality of the current guess as well as failure of the system (unboundedness for instance) and maximum resources (number of evaluations of functions, elapsed time ...) + +Stopping uses the informations furnished by the State to evaluate its functions. Communication between the two can be done through the following functions: +* `update_and_start!(stp :: AbstractStopping; kwargs...)` updates the states with informations furnished as kwargs and then call start!. +* `update_and_stop!(stp :: AbstractStopping; kwargs...)` updates the states with informations furnished as kwargs and then call stop!. +* `fill_in!(stp :: AbstractStopping, x :: Iterate)` a function that fill in all the State with all the informations required to correctly evaluate the stopping functions. This can reveal useful, for instance, if the user do not trust the informations furnished by the algorithm in the State. +* `reinit!(stp :: AbstractStopping)` reinitialize the entries of +the Stopping to reuse for another call. + +Consult the [HowTo tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/runhowto.jl) to learn more about the possibilities offered by Stopping. + +You can also access other examples of algorithms in the [test/examples](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/) folder, which for instance illustrate the strenght of Stopping with subproblems: +* Consult the [OptimSolver tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/run-optimsolver.jl) for more on how to use Stopping with nested algorithms. +* Check the [Benchmark tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/benchmark.jl) to see how Stopping can combined with [SolverBenchmark.jl](https://juliasmoothoptimizers.github.io/SolverBenchmark.jl/). +* Stopping can be adapted to closed solvers via a buffer function as in [Buffer tutorial](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/buffer.jl) for an instance with [Ipopt](https://github.com/JuliaOpt/Ipopt.jl) via [NLPModelsIpopt](https://github.com/JuliaSmoothOptimizers/NLPModelsIpopt.jl). + +## How to install +Install and test the Stopping package with the Julia package manager: +```julia +pkg> add Stopping +pkg> test Stopping +``` +You can access the most up-to-date version of the Stopping package using: +```julia +pkg> add https://github.com/vepiteski/Stopping.jl +pkg> test Stopping +pkg> status Stopping +``` +## Example + +As an example, a naive version of the Newton method is provided [here](https://github.com/vepiteski/Stopping.jl/blob/master/test/examples/newton.jl). First we import the packages: +``` +using LinearAlgebra, NLPModels, Stopping +``` +We consider a quadratic test function, and create an uncontrained quadratic optimization problem using [NLPModels](https://github.com/JuliaSmoothOptimizers/NLPModels.jl): +``` +A = rand(5, 5); Q = A' * A; +f(x) = 0.5 * x' * Q * x +nlp = ADNLPModel(f, ones(5)) +``` + +We now initialize the *NLPStopping*. First create a State. +``` +nlp_at_x = NLPAtX(ones(5)) +``` +We use [unconstrained_check](https://github.com/vepiteski/Stopping.jl/blob/master/src/Stopping/nlp_admissible_functions.jl) as an optimality function +``` +stop_nlp = NLPStopping(nlp, nlp_at_x, optimality_check = unconstrained_check) +``` +Note that, since we used a default State, an alternative would have been: +``` +stop_nlp = NLPStopping(nlp) +``` + +Now a basic version of Newton to illustrate how to use Stopping. +``` +function newton(stp :: NLPStopping) + + #Notations + pb = stp.pb; state = stp.current_state; + #Initialization + xt = state.x + + #First, call start! to check optimality and set an initial configuration + #(start the time counter, set relative error ...) + OK = update_and_start!(stp, x = xt, gx = grad(pb, xt), Hx = hess(pb, xt)) + + while !OK + #Compute the Newton direction (state.Hx only has the lower triangular) + d = (state.Hx + state.Hx' - diagm(0 => diag(state.Hx))) \ (- state.gx) + #Update the iterate + xt = xt + d + #Update the State and call the Stopping with stop! + OK = update_and_stop!(stp, x = xt, gx = grad(pb, xt), Hx = hess(pb, xt)) + end + + return stp +end +``` +Finally, we can call the algorithm with our Stopping: +``` +stop_nlp = newton(stop_nlp) +``` +and consult the Stopping to know what happened +``` +#We can then ask stop_nlp the final status +@test :Optimal in status(stop_nlp, list = true) +#Explore the final values in stop_nlp.current_state +printstyled("Final solution is $(stop_nlp.current_state.x)", color = :green) +``` +We reached optimality, and thanks to the Stopping structure this simple looking +algorithm verified at each step of the algorithm: +- time limit has been respected; +- evaluations of the problem are not excessive; +- the problem is not unbounded (w.r.t. x and f(x)); +- there is no NaN in x, f(x), g(x), H(x); +- the maximum number of iteration (call to stop!) is limited. + +## Long-Term Goals + +Stopping is aimed as a tool for improving the reusability and robustness in the implementation of iterative algorithms. We warmly welcome any feedback or comment leading to potential improvements. + +Future work will address more sophisticated problems such as mixed-integer optimization problems, optimization with uncertainty. The list of suggested optimality functions will be enriched with state of the art conditions. diff --git a/docs/src/lastopping.md b/docs/src/lastopping.md new file mode 100644 index 00000000..a7d12f47 --- /dev/null +++ b/docs/src/lastopping.md @@ -0,0 +1,32 @@ +## LinearAlgebraStopping: A Stopping for linear algebra + +The Stopping-structure can be adapted to any problem solved by iterative methods. We discuss here `LAStopping` a specialization of an `AbstractStopping` for linear systems: +$$ +Ax=b \text{ or } \min_x \frac{1}{2}\|Ax - b\|^2 +$$ +. We highlight here the specifities of such instance: +- The problem is either an `LLSModel` or `Stopping.LinearSystem`. +- These two types of problems have some access on `A`, `b` and counters of evaluations. The matrix `A` can be either given as a sparse/dense matrix or a linear operator. +- Default optimality functions are checking either the system directly or the normal equation. + +```julia +#Problem definition: +m, n = 200, 100 #size of A: m x n +A = 100 * rand(m, n) #It's a dense matrix :) +xref = 100 * rand(n) +b = A * xref +#Our initial guess +x0 = zeros(n) + +#Two definitions of LAStopping: 1) for dense matrix: +la_stop = LAStopping(A, b, GenericState(x0), + max_iter = 150000, + rtol = 1e-6, + max_cntrs = Stopping._init_max_counters_NLS(residual = 150000)) +#2) for a linear operator: +op_stop = LAStopping(LinearSystem(LinearOperator(A), b), + GenericState(x0), + max_iter = 150000, + rtol = 1e-6, + max_cntrs = Stopping._init_max_counters_linear_operators(nprod = 150000)) +``` diff --git a/docs/src/nlpstopping.md b/docs/src/nlpstopping.md new file mode 100644 index 00000000..95f352bb --- /dev/null +++ b/docs/src/nlpstopping.md @@ -0,0 +1,18 @@ +## NLPStopping: A Stopping for NLPModels + +The Stopping-structure can be adapted to any problem solved by iterative methods. We discuss here `NLPStopping` a specialization of an `AbstractStopping` for problems of type `NLPModels`. We highlight here the specifities of such instance: +- The problem is an `NLPModel` +- The problem has a funcion-evaluation counter, so we setup a maximum-counters structure in the meta. +- The State is an `NLPAtX` with entries corresponding to usual information for nonlinear optimization models. +- The unboundedness check verifies that the objective function is unbounded below for minimization problems, and above for maximization; +- The problem is declared infeasibility if the score is `Inf` for minimization problems, and `-Inf` for maximization. + +```julia +nlp = ADNLPModel(x->sum(x.^2), zeros(5)) +nlp_at_x = NLPAtX(zeros(5)) +meta = StoppingMeta(max_cntrs = _init_max_counters()) +stp = NLPStopping(pb, meta, state) +``` + +By default for `NLPStopping` the optimality function is a function checking the `KKT` conditions using information in the State. +The function `fill_in!` computes all the missing entries in the State. This is an potentially expensive operation, but might be useful. diff --git a/docs/src/speak-to-stopping.md b/docs/src/speak-to-stopping.md new file mode 100644 index 00000000..3a1f5c57 --- /dev/null +++ b/docs/src/speak-to-stopping.md @@ -0,0 +1,38 @@ +## Do you speak Stopping? + +When using a Stopping-compatible algorithm, a.k.a an algorithm that takes a Stopping as an input and update it, +the user is free to explore the results and influence the execution of the algorithm. + +First, we need to create a Stopping. +```julia +x = ones(10) +problem = nothing #or your instance +stp = GenericStopping(pb, x, max_time = 10.) #short-cut initializing a `GenericState` and a `StoppingMeta` +@show stp.meta.max_time == 10. #by default the `kwargs...` are passed to the meta. +``` +One can also creates separately a state and a meta to form a Stopping: +```julia +state = GenericState(x) +meta = StoppingMeta(max_time = 10.) +stp = GenericStopping(pb, meta, state) +``` +Once the `Stopping` has been initialized, we call the algorithm and exploit the output. +```julia +stp = rand_solver(stp, x) #call your favorite solver +``` +To understand why the algorithm stopped we use `status`. +```julia +status(stp) #or `status(stp, rlist = true)` to have the complete list. +``` +The solution as well as problem-related information can be accessed from the state. +```julia +sol = stp.current_state.x +``` + +### FAQ: How do I know the entries in the Stopping, State or the Meta? +You can use Julia's build-in `fieldnames` function. +```julia +fieldnames(stp) +fieldnames(stp.current_state) +fieldnames(stp.meta) +``` diff --git a/docs/src/stop-workflow.md b/docs/src/stop-workflow.md new file mode 100644 index 00000000..0482ca66 --- /dev/null +++ b/docs/src/stop-workflow.md @@ -0,0 +1,29 @@ +## Stopping-work flow + +The table below depict the various checks done by the function `stop!` and their connection with the `meta`, `current_state` and `current_state`. The *function* entry correspond to the function used internally by `stop!`, they can be imported and redifined to be adapted for a specific problem, for instance `NLPStopping` for `NLPModels`. The *remote\_control* entry corresponds to the attribute in the `remote_control` that could be set as true/false to activate/deactivate this check. The *meta\_status* gives the attribute in the `meta` with the check's answer. Finally the last column corresponds to entries in the `meta` parametrizing this check. + +| Check description | Function | remote control | meta statuses | meta tolerances | +| ----------------------------------------- | ---------------------------------:| ---------------:| -----------:| ----:| +| Check unboundedness and the domain of `x` | _unbounded_and_domain_x_check! | unbounded_and_domain_x_check | domainerror and unbounded_problem_x | stp.meta.unbounded_x| +| Check the domain in state (NaN's ...) | _domain_check | domain_check | domainerror | | +| Check optimality | _optimality_check! and _null_test | optimality_check | optimal | See *how to check optimality with Stopping* | +| Check for infeasibility | _infeasibility_check! | infeasibility_check | infeasible | | +| Check for unboundedness in problem values | _unbounded_problem_check! | unbounded_problem_check | unbounded_problem | | +| Check time-limit | _tired_check! | tired_check | tired | start_time, max_time | +| Check for limits in resources | _resources_check! | resources_check | resources || +| Check if algo is stalling | _stalled_check! | stalled_check | stalled || +| Count the number of stop! and limits | _iteration_check! | iteration_check | iteration_limit | max_iter | +| Check if the main_stp stops | _main_pb_check! | main_pb_check | main_pb || +| Callback user check | _user_check! | user_check | stopbyuser | user_check_func! | + +### FAQ: Is Stopping initializing `meta.start_time` on its own? + +Yes, it does when you call `start!` as well as `optimality0` if `start!` check the optimality. + +### FAQ: How to set-up the `user_check`? + +Stopping call the `user_check_func!` defined in the `meta`. + +### FAQ: How does Stopping check the optimality? + +See the tutorial on this topic. diff --git a/src/State/ListOfStates.jl b/src/State/ListOfStates.jl index 22f56aba..27507db8 100644 --- a/src/State/ListOfStates.jl +++ b/src/State/ListOfStates.jl @@ -119,9 +119,9 @@ function print(list :: AbstractListStates; for k in fieldnames(typeof(list.list[1,1])) tab = vcat(tab, [getfield(i[1], k) for i in list.list]'); end - df = DataFrame(tab) + df = DataFrame(tab, :auto) - if print_sym == nothing + if isnothing(print_sym) verbose && print(df) else verbose && print(df[!, print_sym]) diff --git a/src/Stopping.jl b/src/Stopping.jl index 754ecf5e..d75b426e 100644 --- a/src/Stopping.jl +++ b/src/Stopping.jl @@ -153,9 +153,9 @@ function show(io :: IO, stp :: VoidStopping) end function show(io :: IO, stp :: AbstractStopping) println(io, typeof(stp)) - print(io, stp.meta) - print(io, stp.stop_remote) - print(io, stp.current_state) + #print(io, stp.meta) #we can always print stp.meta + #print(io, stp.stop_remote) #we can always print stp.stop_remote + #print(io, stp.current_state) #we can always print stp.current_state if !(typeof(stp.main_stp) <: VoidStopping) println(io, "It has a main_stp $(typeof(stp.main_stp))") else @@ -171,17 +171,17 @@ function show(io :: IO, stp :: AbstractStopping) print("Problem is ") show(io, stp.pb) catch - print("Problem is $(typeof(stp.pb)).") + print("Problem is $(typeof(stp.pb)). ") end - if stp.stopping_user_struct != nothing + if !isnothing(stp.stopping_user_struct) try print("The user-defined structure is ") show(io, stp.stopping_user_struct) catch - print("The user-defined structure is of type $(typeof(stp.stopping_user_struct)).") + print("The user-defined structure is of type $(typeof(stp.stopping_user_struct)).\n") end else - print(io, "No user-defined structure is furnished.") + print(io, "No user-defined structure is furnished.\n") end end diff --git a/src/Stopping/GenericStoppingmod.jl b/src/Stopping/GenericStoppingmod.jl index 687ae4a3..3aa164b0 100644 --- a/src/Stopping/GenericStoppingmod.jl +++ b/src/Stopping/GenericStoppingmod.jl @@ -158,7 +158,7 @@ Returns the optimality status of the problem as a boolean. Note: Kwargs are forwarded to the *update!* call. """ function update_and_start!(stp :: AbstractStopping; - no_start_opt_check :: Bool = false, + no_opt_check :: Bool = false, kwargs...) if stp.stop_remote.cheap_check @@ -166,7 +166,7 @@ function update_and_start!(stp :: AbstractStopping; else update!(stp.current_state; kwargs...) end - OK = start!(stp, no_start_opt_check = no_start_opt_check) + OK = start!(stp, no_opt_check = no_opt_check) return OK end @@ -174,11 +174,11 @@ end """ Update the Stopping and return *true* if we must stop. - `start!(:: AbstractStopping; no_start_opt_check :: Bool = false, kwargs...)` + `start!(:: AbstractStopping; no_opt_check :: Bool = false, kwargs...)` Purpose is to know if there is a need to even perform an optimization algorithm or if we are at an optimal solution from the beginning. - Set `no_start_opt_check` to *true* avoid checking optimality and domain errors. + Set `no_opt_check` to *true* avoid checking optimality and domain errors. The function `start!` successively calls: `_domain_check(stp, x)`, `_optimality_check!(stp, x)`, `_null_test(stp, x)` and @@ -186,17 +186,16 @@ end Note: - `start!` initializes `stp.meta.start_time` (if not done before), `stp.current_state.current_time` and `stp.meta.optimality0` - (if `no_start_opt_check` is false). + (if `no_opt_check` is false). - Keywords argument are passed to the `_optimality_check!` call. - Compatible with the `StopRemoteControl`. """ function start!(stp :: AbstractStopping; - no_start_opt_check :: Bool = false, + no_opt_check :: Bool = false, kwargs...) state = stp.current_state src = stp.stop_remote - x = state.x #Initialize the time counter if src.tired_check && isnan(stp.meta.start_time) @@ -207,9 +206,10 @@ function start!(stp :: AbstractStopping; _update_time!(state, stp.meta.start_time) end - if !no_start_opt_check + if !no_opt_check stp.meta.domainerror = if src.domain_check - _domain_check(stp.current_state) + #don't check current_score + _domain_check(stp.current_state, current_score = true) else stp.meta.domainerror end @@ -229,7 +229,7 @@ function start!(stp :: AbstractStopping; end end - src.user_start_check && _user_check!(stp, x, true) + src.user_start_check && _user_check!(stp, state.x, true) OK = OK_check(stp.meta) @@ -314,19 +314,22 @@ Note: - Kwargs are sent to the *\\_optimality\\_check!* call. - If listofstates != VoidListStates, call add\\_to\\_list! to update the list of State. """ -function stop!(stp :: AbstractStopping; kwargs...) +function stop!(stp :: AbstractStopping; + no_opt_check :: Bool = false, + kwargs...) - x = stp.current_state.x - src = stp.stop_remote + x = stp.current_state.x + src = stp.stop_remote src.unbounded_and_domain_x_check && _unbounded_and_domain_x_check!(stp, x) stp.meta.domainerror = if src.domain_check - #don't check x - _domain_check(stp.current_state, x = true) + #don't check x and current_score + _domain_check(stp.current_state, x = true, + current_score = true) else stp.meta.domainerror end - if !stp.meta.domainerror + if !no_opt_check && !stp.meta.domainerror # Optimality check if src.optimality_check score = _optimality_check!(stp; kwargs...) @@ -667,8 +670,10 @@ status: returns the status of the algorithm: `status(:: AbstractStopping; list = false)` -The different status are: +The different statuses are: - Optimal: reached an optimal solution. +- SubProblemFailure +- SubOptimal: reached an acceptable solution. - Unbounded: current iterate too large in norm. - UnboundedPb: unbouned problem. - Stalled: stalled algorithm. @@ -682,6 +687,8 @@ The different status are: considered feasible. - StopByUser: stopped by the user. - DomainError: there is a NaN somewhere. +- Exception: unhandled exception +- Unknwon: if stopped for reasons unknown by Stopping. Note: - Set keyword argument *list* to true, to get an Array with all the status. @@ -689,27 +696,28 @@ Note: """ function status(stp :: AbstractStopping; list = false) - tt = Dict([(:Optimal, :optimal), - (:SubProblemFailure, :fail_sub_pb), - (:SubOptimal, :suboptimal), - (:Unbounded, :unbounded), - (:UnboundedPb, :unbounded_pb), - (:Stalled, :stalled), - (:IterationLimit, :iteration_limit), - (:TimeLimit, :tired), - (:EvaluationLimit, :resources), - (:ResourcesOfMainProblemExhausted, :main_pb), - (:Infeasible, :infeasible), - (:StopByUser, :stopbyuser), - (:DomainError, :domainerror)]) - - if list - list_status = findall(x -> getfield(stp.meta, x), tt) - if list_status == zeros(0) list_status = [:Unknown] end - else - list_status = findfirst(x -> getfield(stp.meta, x), tt) - if list_status == nothing list_status = :Unknown end - end + tt = Dict([(:Optimal, :optimal), + (:SubProblemFailure, :fail_sub_pb), + (:SubOptimal, :suboptimal), + (:Unbounded, :unbounded), + (:UnboundedPb, :unbounded_pb), + (:Stalled, :stalled), + (:IterationLimit, :iteration_limit), + (:TimeLimit, :tired), + (:EvaluationLimit, :resources), + (:ResourcesOfMainProblemExhausted, :main_pb), + (:Infeasible, :infeasible), + (:StopByUser, :stopbyuser), + (:Exception, :exception), + (:DomainError, :domainerror)]) + + if list + list_status = findall(x -> getfield(stp.meta, x), tt) + if list_status == zeros(0) list_status = [:Unknown] end + else + list_status = findfirst(x -> getfield(stp.meta, x), tt) + if isnothing(list_status) list_status = :Unknown end + end - return list_status + return list_status end diff --git a/src/Stopping/NLPStoppingmod.jl b/src/Stopping/NLPStoppingmod.jl index 95e9f387..898b7465 100644 --- a/src/Stopping/NLPStoppingmod.jl +++ b/src/Stopping/NLPStoppingmod.jl @@ -245,27 +245,27 @@ function fill_in!(stp :: NLPStopping, matrix_info :: Bool = true, kwargs...) - gfx = fx == nothing ? obj(stp.pb, x) : fx - ggx = gx == nothing ? grad(stp.pb, x) : gx + gfx = isnothing(fx) ? obj(stp.pb, x) : fx + ggx = isnothing(gx) ? grad(stp.pb, x) : gx - if Hx == nothing && matrix_info + if isnothing(Hx) && matrix_info gHx = hess(stp.pb, x) else gHx = Hx end if stp.pb.meta.ncon > 0 - gJx = Jx == nothing ? jac(stp.pb, x) : Jx - gcx = cx == nothing ? cons(stp.pb, x) : cx + gJx = isnothing(Jx) ? jac(stp.pb, x) : Jx + gcx = isnothing(cx) ? cons(stp.pb, x) : cx else gJx = stp.current_state.Jx gcx = stp.current_state.cx end #update the Lagrange multiplier if one of the 2 is asked - if (stp.pb.meta.ncon > 0 || has_bounds(stp.pb)) && (lambda == nothing || mu == nothing) + if (stp.pb.meta.ncon > 0 || has_bounds(stp.pb)) && (isnothing(lambda) || isnothing(mu)) lb, lc = _compute_mutliplier(stp.pb, x, ggx, gcx, gJx; kwargs...) - elseif stp.pb.meta.ncon == 0 && !has_bounds(stp.pb) && lambda == nothing + elseif stp.pb.meta.ncon == 0 && !has_bounds(stp.pb) && isnothing(lambda) lb, lc = mu, stp.current_state.lambda else lb, lc = mu, lambda @@ -276,6 +276,43 @@ function fill_in!(stp :: NLPStopping, lambda = lc) end +""" +For NLPStopping, `rcounters` set as true also reinitialize the counters. +""" +function reinit!(stp :: NLPStopping; + rstate :: Bool = false, + rlist :: Bool = true, + rcounters :: Bool = false, + kwargs...) + + stp.meta.start_time = NaN + stp.meta.optimality0 = 1.0 + + #reinitialize the boolean status + reinit!(stp.meta) + + #reinitialize the counter of stop + stp.meta.nb_of_stop = 0 + + #reinitialize the list of states + if rlist && (typeof(stp.listofstates) != VoidListStates) + #TODO: Warning we cannot change the type of ListStates + stp.listofstates = rstate ? VoidListStates() : ListStates(stp.current_state) + end + + #reinitialize the state + if rstate + reinit!(stp.current_state; kwargs...) + end + + #reinitialize the NLPModel Counters + if rcounters && typeof(stp.pb) <: AbstractNLPModel + NLPModels.reset!(stp.pb) + end + + return stp +end + """ \\_resources\\_check!: check if the optimization algorithm has exhausted the resources. This is the NLP specialized version that takes into account diff --git a/src/Stopping/StoppingMetamod.jl b/src/Stopping/StoppingMetamod.jl index cdc9bb1e..ea838c10 100644 --- a/src/Stopping/StoppingMetamod.jl +++ b/src/Stopping/StoppingMetamod.jl @@ -34,7 +34,8 @@ Attributes: - main_pb : status. - domainerror : status. - suboptimal : status. -- stopbyuser : status +- stopbyuser : status. +- exception : status. - meta_user_struct : Any - user_check_func! : Function (AbstractStopping, Bool) -> callback. @@ -62,55 +63,56 @@ mutable struct StoppingMeta{TolType <: Number, IntType <: Int } <: AbstractStoppingMeta - # problem tolerances - atol :: TolType # absolute tolerance - rtol :: TolType # relative tolerance - optimality0 :: TolType # value of the optimality residual at starting point - tol_check :: Function #function of atol, rtol and optimality0 - #by default: tol_check = max(atol, rtol * optimality0) - #other example: atol + rtol * optimality0 - tol_check_neg :: Function # function of atol, rtol and optimality0 - check_pos :: CheckType #pre-allocation for positive tolerance - check_neg :: CheckType #pre-allocation for negative tolerance - optimality_check :: Function # stopping criterion - # Function of (pb, state; kwargs...) - #return type :: Union{Number, eltype(stp.meta)} - retol :: Bool #true if tolerances are updated - - unbounded_threshold :: TolType # beyond this value, the problem is declared unbounded - unbounded_x :: TolType # beyond this value, ||x||_\infty is unbounded - - # fine grain control on ressources - max_f :: IntType # max function evaluations allowed TODO: used? - max_cntrs :: Dict{Symbol,Int64} #contains the detailed max number of evaluations - - # global control on ressources - max_eval :: IntType # max evaluations (f+g+H+Hv) allowed TODO: used? - max_iter :: IntType # max iterations allowed - max_time :: Float64 # max elapsed time allowed - - #intern Counters - nb_of_stop :: IntType - #intern start_time - start_time :: Float64 - - # stopping properties status of the problem) - fail_sub_pb :: Bool - unbounded :: Bool - unbounded_pb :: Bool - tired :: Bool - stalled :: Bool - iteration_limit :: Bool - resources :: Bool - optimal :: Bool - infeasible :: Bool - main_pb :: Bool - domainerror :: Bool - suboptimal :: Bool - stopbyuser :: Bool - - meta_user_struct :: MUS - user_check_func! :: Function #called dans Stopping._user_check!(stp, x) + # problem tolerances + atol :: TolType # absolute tolerance + rtol :: TolType # relative tolerance + optimality0 :: TolType # value of the optimality residual at starting point + tol_check :: Function #function of atol, rtol and optimality0 + #by default: tol_check = max(atol, rtol * optimality0) + #other example: atol + rtol * optimality0 + tol_check_neg :: Function # function of atol, rtol and optimality0 + check_pos :: CheckType #pre-allocation for positive tolerance + check_neg :: CheckType #pre-allocation for negative tolerance + optimality_check :: Function # stopping criterion + # Function of (pb, state; kwargs...) + #return type :: Union{Number, eltype(stp.meta)} + retol :: Bool #true if tolerances are updated + + unbounded_threshold :: TolType # beyond this value, the problem is declared unbounded + unbounded_x :: TolType # beyond this value, ||x||_\infty is unbounded + + # fine grain control on ressources + max_f :: IntType # max function evaluations allowed TODO: used? + max_cntrs :: Dict{Symbol,Int64} #contains the detailed max number of evaluations + + # global control on ressources + max_eval :: IntType # max evaluations (f+g+H+Hv) allowed TODO: used? + max_iter :: IntType # max iterations allowed + max_time :: Float64 # max elapsed time allowed + + #intern Counters + nb_of_stop :: IntType + #intern start_time + start_time :: Float64 + + # stopping properties status of the problem) + fail_sub_pb :: Bool + unbounded :: Bool + unbounded_pb :: Bool + tired :: Bool + stalled :: Bool + iteration_limit :: Bool + resources :: Bool + optimal :: Bool + infeasible :: Bool + main_pb :: Bool + domainerror :: Bool + suboptimal :: Bool + stopbyuser :: Bool + exception :: Bool + + meta_user_struct :: MUS + user_check_func! :: Function #called dans Stopping._user_check!(stp, x) end @@ -135,10 +137,10 @@ function StoppingMeta(;atol :: Number = 1.0e-6, check_pos = tol_check(atol, rtol, optimality0) check_neg = tol_check_neg(atol, rtol, optimality0) - # This might be an expansive step. - # if (true in (check_pos .< check_neg)) #any(x -> x, check_pos .< check_neg) - # throw(ErrorException("StoppingMeta: tol_check should be greater than tol_check_neg.")) - # end + # This might be an expansive step. + # if (true in (check_pos .< check_neg)) #any(x -> x, check_pos .< check_neg) + # throw(ErrorException("StoppingMeta: tol_check should be greater than tol_check_neg.")) + # end fail_sub_pb = false unbounded = false @@ -153,6 +155,7 @@ function StoppingMeta(;atol :: Number = 1.0e-6, domainerror = false suboptimal = false stopbyuser = false + exception = false nb_of_stop = 0 @@ -165,7 +168,7 @@ function StoppingMeta(;atol :: Number = 1.0e-6, max_time, nb_of_stop, start_time, fail_sub_pb, unbounded, unbounded_pb, tired, stalled, iteration_limit, resources, optimal, infeasible, main_pb, - domainerror, suboptimal, stopbyuser, + domainerror, suboptimal, stopbyuser, exception, meta_user_struct, user_check_func!) end @@ -181,7 +184,8 @@ const meta_statuses = [:fail_sub_pb, :main_pb, :domainerror, :infeasible, - :stopbyuser] + :stopbyuser, + :exception] """ `OK_check(meta :: StoppingMeta)` @@ -295,4 +299,4 @@ function show(io :: IO, meta :: AbstractStoppingMeta) varlines=string(varlines, "There is no user defined structure in the meta.\n") end println(io, varlines) -end \ No newline at end of file +end diff --git a/src/Stopping/nlp_admissible_functions.jl b/src/Stopping/nlp_admissible_functions.jl index be942def..59d3a57e 100644 --- a/src/Stopping/nlp_admissible_functions.jl +++ b/src/Stopping/nlp_admissible_functions.jl @@ -46,7 +46,7 @@ function unconstrained2nd_check(pb :: AbstractNLPModel, end res = max(norm(state.gx, pnorm), - max(- eigmin(state.Hx + state.Hx' - diagm(0 => diag(state.Hx))), 0.0)) + max(- eigmin(Symmetric(state.Hx, :L)), 0.0)) return res end diff --git a/test/examples/newton.jl b/test/examples/newton.jl index 75066bec..7df55928 100644 --- a/test/examples/newton.jl +++ b/test/examples/newton.jl @@ -31,7 +31,7 @@ function newton(stp :: NLPStopping) while !OK #Compute the Newton direction - d = (state.Hx + state.Hx' - diagm(0 => diag(state.Hx))) \ (- state.gx) + d = Symmetric(state.Hx, :L) \ (- state.gx) #Update the iterate xt = xt + d #Update the State and call the Stopping with stop! diff --git a/test/examples/uncons.jl b/test/examples/uncons.jl index 83ae1217..d2d2c616 100644 --- a/test/examples/uncons.jl +++ b/test/examples/uncons.jl @@ -48,7 +48,7 @@ function global_newton(stp :: NLPStopping, #main loop while !OK #Compute the Newton direction - d = (state.Hx + state.Hx' - diagm(0 => diag(state.Hx))) \ (-state.gx) + d = Symmetric(state.Hx, :L) \ (-state.gx) #Prepare the substopping #We reinitialize the stopping before each new use diff --git a/test/runtests.jl b/test/runtests.jl index c4d0f4ff..3892160c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -39,3 +39,6 @@ include("test-stopping/test-unitaire-linearalgebrastopping.jl") printstyled("HowTo tests...\n") include("examples/runhowto.jl") + +#printstyled("Run OptimSolver tests...\n") +#include("examples/run-optimsolver.jl") diff --git a/test/test-state/unit-test-GenericStatemod.jl b/test/test-state/unit-test-GenericStatemod.jl index 5bda6dc7..e27f0cd5 100644 --- a/test/test-state/unit-test-GenericStatemod.jl +++ b/test/test-state/unit-test-GenericStatemod.jl @@ -2,6 +2,8 @@ x0 = ones(6) state0 = GenericState(x0) +show(state0) + @test scoretype(state0) == Float64 @test xtype(state0) == Array{Float64,1} @@ -48,4 +50,5 @@ reinit!(state0, current_time = 0.5) @test !Stopping._check_nan_miss(Counters()) @test !Stopping._check_nan_miss(spzeros(0)) @test !Stopping._check_nan_miss(zeros(0)) -@test !Stopping._check_nan_miss(missing) \ No newline at end of file +@test !Stopping._check_nan_miss(missing) +@test !Stopping._check_nan_miss(spzeros(0,0)) diff --git a/test/test-stopping/test-unitaire-generic-stopping.jl b/test/test-stopping/test-unitaire-generic-stopping.jl index 5f7ee330..5ed0293b 100644 --- a/test/test-stopping/test-unitaire-generic-stopping.jl +++ b/test/test-stopping/test-unitaire-generic-stopping.jl @@ -154,4 +154,4 @@ @test Stopping._inequality_check((0.,1.), (1.,1.), (-1.,-1.)) == true @test_throws ErrorException Stopping._inequality_check(zeros(2), ones(3), -ones(2)) -end \ No newline at end of file +end diff --git a/test/test-stopping/test-unitaire-linearalgebrastopping.jl b/test/test-stopping/test-unitaire-linearalgebrastopping.jl index c7665188..f57668f8 100644 --- a/test/test-stopping/test-unitaire-linearalgebrastopping.jl +++ b/test/test-stopping/test-unitaire-linearalgebrastopping.jl @@ -126,16 +126,16 @@ op_stop = LAStopping(LinearSystem(LinearOperator(A), b), opbis_stop = LAStopping(LinearOperator(A), b) try - @time RandomizedBlockKaczmarz(la_stop) + @timed RandomizedBlockKaczmarz(la_stop) @test status(la_stop) == :Optimal - @time RandomizedBlockKaczmarz(sa_stop) + @timed RandomizedBlockKaczmarz(sa_stop) @test status(sa_stop) == :Optimal catch @warn "If LSSModel.A does not exist consider [la_stop.pb.Avals[i,j] for (i) in la_stop.pb.Arows, j in la_stop.pb.Acols]" #https://github.com/JuliaSmoothOptimizers/NLPModels.jl/blob/master/src/lls_model.jl end -@time RandomizedBlockKaczmarz(op_stop) +@timed RandomizedBlockKaczmarz(op_stop) @test status(op_stop) == :Optimal update!(la_stop.current_state, x = xref) @@ -143,4 +143,4 @@ update!(la_stop.current_state, x = xref) update!(op_stop.current_state, x = xref) @test normal_equation_check(op_stop.pb, op_stop.current_state) <= la_stop.meta.atol -end \ No newline at end of file +end diff --git a/test/test-stopping/test-unitaire-nlp-stopping.jl b/test/test-stopping/test-unitaire-nlp-stopping.jl index 1993b1bd..efc8b4ef 100644 --- a/test/test-stopping/test-unitaire-nlp-stopping.jl +++ b/test/test-stopping/test-unitaire-nlp-stopping.jl @@ -81,13 +81,15 @@ stop_bnd.meta.optimality_check = (x,y) -> NaN start!(stop_bnd) @test stop_bnd.meta.domainerror == true - reinit!(stop_bnd) + reinit!(stop_bnd, rcounters = true) + @test neval_grad(stop_bnd.pb) == 0 @test stop_bnd.meta.domainerror == false stop!(stop_bnd) @test stop_bnd.meta.domainerror == true stop_bnd.meta.optimality_check = (x,y) -> 0.0 - reinit!(stop_bnd) + reinit!(stop_bnd, rcounters = true) + @test neval_grad(stop_bnd.pb) == 0 fill_in!(stop_bnd, zeros(5), mu = ones(5), lambda = zeros(0)) @test stop_bnd.current_state.mu == ones(5) @test stop_bnd.current_state.lambda == zeros(0) @@ -111,4 +113,4 @@ @test true end -end \ No newline at end of file +end diff --git a/test/test-stopping/unit-test-remote-control.jl b/test/test-stopping/unit-test-remote-control.jl index 6d598b43..2a083eb3 100644 --- a/test/test-stopping/unit-test-remote-control.jl +++ b/test/test-stopping/unit-test-remote-control.jl @@ -33,5 +33,6 @@ end end - + show(src) + end diff --git a/test/test-stopping/unit-test-stopping-meta.jl b/test/test-stopping/unit-test-stopping-meta.jl index 1f8cc19d..e9d75bf7 100644 --- a/test/test-stopping/unit-test-stopping-meta.jl +++ b/test/test-stopping/unit-test-stopping-meta.jl @@ -29,6 +29,7 @@ @test test_meta.suboptimal == false @test test_meta.main_pb == false @test test_meta.stopbyuser == false + @test test_meta.exception == false @test test_meta.infeasible == false @test test_meta.nb_of_stop == 0 @test test_meta.meta_user_struct == nothing