Skip to content

Commit

Permalink
Merge pull request #1 from Goysa2/dev-sam
Browse files Browse the repository at this point in the history
stopping adjusted to accept kwargs
  • Loading branch information
vepiteski committed Jun 5, 2017
2 parents 082595f + a740599 commit d600fbe
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 34 deletions.
35 changes: 18 additions & 17 deletions src/stopping.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
export TStopping, start!, stop

type TStopping
nlp :: AbstractNLPModel # the model
atol :: Float64 # absolute tolerance
rtol :: Float64 # relative tolerance
rtol :: Float64 # relative tolerance
unbounded_threshold :: Float64 # below this value, the problem is declared unbounded
# fine grain control on ressources
max_obj_f :: Int # max objective function (f) evaluations allowed
Expand All @@ -21,51 +20,53 @@ type TStopping
optimality_residual :: Function # function to compute the optimality residual


function TStopping(nlp :: AbstractNLPModel;
atol :: Float64 = 1.0e-8,
function TStopping(;atol :: Float64 = 1.0e-8,
rtol :: Float64 = 1.0e-6,
unbounded_threshold :: Float64 = -1.0e50,
max_obj_f :: Int = typemax(Int),
max_obj_grad :: Int = typemax(Int),
max_obj_f :: Int = typemax(Int),
max_obj_grad :: Int = typemax(Int),
max_obj_hess :: Int = typemax(Int),
max_obj_hv :: Int = typemax(Int),
max_eval :: Int = 20000,
max_iter :: Int = 5000,
max_time :: Float64 = 600.0, # 10 minutes
optimality_residual :: Function = x -> norm(x,Inf)
)
optimality_residual :: Function = x -> norm(x,Inf),
kwargs...)

return new(nlp, atol, rtol, unbounded_threshold,
max_obj_f, max_obj_grad, max_obj_hess, max_obj_hv, max_eval,
return new(atol, rtol, unbounded_threshold,
max_obj_f, max_obj_grad, max_obj_hess, max_obj_hv, max_eval,
max_iter, max_time, NaN, Inf, optimality_residual)
end
end




function start!(s :: TStopping,
x₀ :: Array{Float64,1} )
function start!(nlp :: AbstractNLPModel,
s :: TStopping,
x₀ :: Array{Float64,1} )

s.optimality0 = s.optimality_residual(grad(s.nlp,x₀))
s.optimality0 = s.optimality_residual(grad(nlp,x₀))
s.start_time = time()
return s
end


function stop(s :: TStopping,
function stop(nlp :: AbstractNLPModel,
s :: TStopping,
iter :: Int,
x :: Array{Float64,1},
f :: Float64,
∇f :: Array{Float64,1},
)

counts = s.nlp.counters
counts = nlp.counters
calls = [counts.neval_obj, counts.neval_grad, counts.neval_hess, counts.neval_hprod]

optimality = s.optimality_residual(∇f)

optimal = (optimality < s.atol) | (optimality <( s.rtol * s.optimality0))
#optimal = (optimality < s.atol) | (optimality <( s.rtol * s.optimality0))
optimal = optimality < s.atol +s.rtol*s.optimality0
unbounded = f <= s.unbounded_threshold


Expand All @@ -91,6 +92,6 @@ function stop(s :: TStopping,

# return everything. Most users will use only the first four fields, but return
# the fine grained information nevertheless.
return optimal, unbounded, tired, elapsed_time,
return optimal, unbounded, tired, elapsed_time,
max_obj_f, max_obj_g, max_obj_H, max_obj_Hv, max_total, max_iter, max_time
end
34 changes: 17 additions & 17 deletions test/steepestS.jl
Original file line number Diff line number Diff line change
@@ -1,29 +1,29 @@
export steepest
export steepestS

function steepest(nlp :: AbstractNLPModel;
s :: TStopping = Stopping(nlp),
function steepestS(nlp :: AbstractNLPModel;
s :: TStopping = TStopping(),
verbose :: Bool=true,
linesearch :: Function = Newarmijo_wolfe,
kwargs...)

x = copy(nlp.meta.x0)
n = nlp.meta.nvar

xt = Array(Float64, n)
∇ft = Array(Float64, n)

f = obj(nlp, x)
∇f = grad(nlp, x)

iter = 0

s = start!(s,x)
s = start!(nlp,s,x)

verbose && @printf("%4s %8s %7s %8s %4s\n", "iter", "f", "‖∇f‖", "∇f'd", "bk")
verbose && @printf("%4d %8.1e %7.1e", iter, f, norm(∇f))

optimal, unbounded, tired, elapsed_time = stop(s,iter,x,f,∇f)
optimal, unbounded, tired, elapsed_time = stop(nlp,s,iter,x,f,∇f)

OK = true
stalled_linesearch = false
stalled_ascent_dir = false
Expand All @@ -36,30 +36,30 @@ function steepest(nlp :: AbstractNLPModel;
#println("Not a descent direction! slope = ", slope)
else
verbose && @printf(" %8.1e", slope)

# Perform improved Armijo linesearch.
h = C1LineFunction(nlp, x, d)
t, good_grad, ft, nbk, nbW, stalled_linesearch = linesearch(h, f, slope, ∇ft, verbose=false; kwargs...)
#!stalled_linesearch || println("Max number of Armijo backtracking ",nbk)
verbose && @printf(" %4d\n", nbk)

xt = x + t*d
good_grad || (∇ft = grad!(nlp, xt, ∇ft))

# Move on.
x = xt
f = ft
∇f = ∇ft
iter = iter + 1

verbose && @printf("%4d %8.1e %7.1e", iter, f, norm(∇f))
optimal, unbounded, tired, elapsed_time = stop(s,iter,x,f,∇f)

optimal, unbounded, tired, elapsed_time = stop(nlp,s,iter,x,f,∇f)
end
OK = !stalled_linesearch & !stalled_ascent_dir
end
verbose && @printf("\n")


if optimal status = :Optimal
elseif unbounded status = :Unbounded
Expand Down

0 comments on commit d600fbe

Please sign in to comment.