Skip to content

Commit

Permalink
Merge 7785a8e into 5d7a236
Browse files Browse the repository at this point in the history
  • Loading branch information
ludoro committed Jun 23, 2019
2 parents 5d7a236 + 7785a8e commit 7efc9a5
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 24 deletions.
79 changes: 57 additions & 22 deletions src/Optimization.jl
@@ -1,14 +1,25 @@
using LinearAlgebra

function merit_function(point,w,surr::AbstractSurrogate,s_max,s_min,d_max,d_min,box_size)
D_x = box_size+1
for i = 1:length(surr.x)
distance = norm(surr.x[i]-point)
if distance < D_x
D_x = distance
if length(point)==1
D_x = box_size+1
for i = 1:length(surr.x)
distance = norm(surr.x[i]-point)
if distance < D_x
D_x = distance
end
end
return w*(surr(point) - s_min)/(s_max-s_min) + (1-w)*((d_max - D_x)/(d_max - d_min))
else
D_x = norm(box_size)+1
for i = 1:length(surr.x)
distance = norm(surr.x[i] .- point)
if distance < D_x
D_x = distance
end
end
return w*(surr(point) - s_min)/(s_max-s_min) + (1-w)*((d_max - D_x)/(d_max - d_min))
end
return w*(surr(point) - s_min)/(s_max-s_min) + (1-w)*((d_max - D_x)/(d_max - d_min))
end

"""
Expand All @@ -18,7 +29,7 @@ optimization(lb,ub,surr::AbstractSurrogate,
Finds minimum of objective function while sampling the AbstractSurrogate at
the same time.
"""
function optimization(lb,ub,surr::AbstractSurrogate,maxiters::Int,sample_type::SamplingAlgorithm,num_new_samples::Int)
function optimization(lb,ub,surr::AbstractSurrogate,maxiters::Int,sample_type::SamplingAlgorithm,num_new_samples::Int,obj::Function)
#Suggested by:
#https://www.mathworks.com/help/gads/surrogate-optimization-algorithm.html
scale = 0.2
Expand All @@ -37,8 +48,19 @@ function optimization(lb,ub,surr::AbstractSurrogate,maxiters::Int,sample_type::S
incumbent_value = minimum(surr.y)
incumbent_x = surr.x[argmin(surr.y)]

new_lb = incumbent_x .- scale*norm(incumbent_x-lb)
new_ub = incumbent_x .+ scale*norm(incumbent_x-lb)
new_lb = incumbent_x .- scale*norm(incumbent_x .-lb)
new_ub = incumbent_x .+ scale*norm(incumbent_x .-lb)

@inbounds for i = 1:length(new_lb)
if new_lb[i] < lb[i]
new_lb = collect(new_lb)
new_lb[i] = lb[i]
end
if new_ub[i] > ub[i]
new_ub = collect(new_ub)
new_ub[i] = ub[i]
end
end
new_sample = sample(num_new_samples,new_lb,new_ub,sample_type)

#2) Create merit function
Expand All @@ -49,11 +71,11 @@ function optimization(lb,ub,surr::AbstractSurrogate,maxiters::Int,sample_type::S
s_max = maximum(s)
s_min = minimum(s)

d_min = box_size + 1
d_min = norm(box_size .+ 1)
d_max = 0.0
for r = 1:length(surr.x)
for c = 1:num_new_samples
distance_rc = norm(surr.x[r]-new_sample[c])
distance_rc = norm(surr.x[r] .- new_sample[c])
if distance_rc > d_max
d_max = distance_rc
end
Expand All @@ -62,20 +84,26 @@ function optimization(lb,ub,surr::AbstractSurrogate,maxiters::Int,sample_type::S
end
end
end
#3) Evaluate merit function in the sampled points
evaluation_of_merit_function = merit_function.(new_sample,w,surr,s_max,s_min,d_max,d_min,box_size)

#3)Evaluate merit function in the sampled points

#PROBLEMS WITH VECTORIZED FUNCTION
evaluation_of_merit_function = zeros(float(eltype(surr.x[1])),num_new_samples,1)
@inbounds for r = 1:num_new_samples
evaluation_of_merit_function[r] = merit_function(new_sample[r],w,surr,s_max,s_min,d_max,d_min,box_size)
end

#4) Find minimum of merit function = adaptive point
adaptive_point_x = new_sample[argmin(evaluation_of_merit_function)]

#4) Evaluate objective function at adaptive point
adaptive_point_y = surr(adaptive_point_x)
adaptive_point_y = obj(adaptive_point_x)

#5) Update surrogate with (adaptive_point,objective(adaptive_point)
add_point!(surr,adaptive_point_x,adaptive_point_y)
add_point!(surr,Tuple(adaptive_point_x),adaptive_point_y)

#6) How to go on?
if surr(adaptive_point_x)[1] < incumbent_value
if surr(adaptive_point_x) < incumbent_value
#success
incumbent_x = adaptive_point_x
incumbent_value = adaptive_point_y
Expand Down Expand Up @@ -129,7 +157,8 @@ optimization(lb::Number,ub::Number,surr::AbstractSurrogate,
Finds minimum of objective function while sampling the AbstractSurrogate at
the same time.
"""
function optimization(lb::Number,ub::Number,surr::AbstractSurrogate,maxiters::Int,sample_type::SamplingAlgorithm,num_new_samples::Int)
function optimization(lb::Number,ub::Number,surr::AbstractSurrogate,maxiters::Int,
sample_type::SamplingAlgorithm,num_new_samples::Int,obj::Function)
#Suggested by:
#https://www.mathworks.com/help/gads/surrogate-optimization-algorithm.html
scale = 0.2
Expand All @@ -146,9 +175,15 @@ function optimization(lb::Number,ub::Number,surr::AbstractSurrogate,maxiters::In
incumbent_value = minimum(surr.y)
incumbent_x = surr.x[argmin(surr.y)]

new_sample = sample(num_new_samples,
incumbent_x-scale*norm(incumbent_x-lb)/2,
incumbent_x+scale*norm(incumbent_x-ub)/2,sample_type)
new_lb = incumbent_x-scale*norm(incumbent_x-lb)/2
new_ub = incumbent_x+scale*norm(incumbent_x-ub)/2
if new_lb < lb
new_lb = lb
end
if new_ub > ub
new_ub = ub
end
new_sample = sample(num_new_samples,new_lb,new_ub,sample_type)

#2) Create merit function
s = zeros(eltype(surr.x[1]),num_new_samples)
Expand Down Expand Up @@ -178,13 +213,13 @@ function optimization(lb::Number,ub::Number,surr::AbstractSurrogate,maxiters::In
adaptive_point_x = new_sample[argmin(evaluation_of_merit_function)]

#4) Evaluate objective function at adaptive point
adaptive_point_y = surr(adaptive_point_x)
adaptive_point_y = obj(adaptive_point_x)

#5) Update surrogate with (adaptive_point,objective(adaptive_point)
add_point!(surr,adaptive_point_x,adaptive_point_y)

#6) How to go on?
if surr(adaptive_point_x)[1] < incumbent_value
if surr(adaptive_point_x) < incumbent_value
#success
incumbent_x = adaptive_point_x
incumbent_value = adaptive_point_y
Expand Down
27 changes: 25 additions & 2 deletions test/optimization.jl
@@ -1,5 +1,7 @@
using Surrogates
using LinearAlgebra

##### 1D #####
objective_function = x -> 2*x+1
x = [2.0,4.0,6.0]
y = [5.0,9.0,13.0]
Expand All @@ -13,8 +15,29 @@ b = 6

#Using Kriging
my_k = Kriging(x,y,p)
optimization(a,b,my_k,10,UniformSample(),10)
optimization(a,b,my_k,10,UniformSample(),10,objective_function)

#Using RadialBasis

my_rad = RadialBasis(x,y,a,b,z->norm(z),1)
optimization(a,b,my_rad,10,UniformSample(),10)
optimization(a,b,my_rad,10,UniformSample(),10,objective_function)



##### ND #####
objective_function_ND = z -> 3*norm(z)+1
x = [(1.2,3.0),(3.0,3.5),(5.2,5.7)]
y = objective_function_ND.(x)
p = [1.2,1.2]
theta = [2.0,2.0]
lb = [1.0,1.0]
ub = [6.0,6.0]

#Kriging
my_k_ND = Kriging(x,y,p,theta)
optimization(lb,ub,my_k_ND,10,UniformSample(),10,objective_function_ND)

#Radials
bounds = [[1.0,6.0],[1.0,6.0]]
my_rad_ND = RadialBasis(x,y,bounds,z->norm(z),1)
optimization(lb,ub,my_rad_ND,10,UniformSample(),10,objective_function_ND)

0 comments on commit 7efc9a5

Please sign in to comment.