Skip to content

Commit

Permalink
Now samplers return a tuple which is out of the box compatible with s…
Browse files Browse the repository at this point in the history
…urrogates and optimization methods
  • Loading branch information
ludoro committed Aug 5, 2019
1 parent 20b38e7 commit e15befb
Show file tree
Hide file tree
Showing 7 changed files with 15 additions and 25 deletions.
10 changes: 5 additions & 5 deletions src/Sampling.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ function sample(n,lb,ub,S::GridSample)
else
d = length(lb)
x = [[rand(lb[j]:dx[j]:ub[j]) for j = 1:d] for i in 1:n]
return x
return Tuple.(x)
end
end

Expand All @@ -30,7 +30,7 @@ function sample(n,lb,ub,::UniformSample)
else
d = length(lb)
x = [[rand(Uniform(lb[j],ub[j])) for j in 1:d] for i in 1:n]
return x
return Tuple.(x)
end
end

Expand All @@ -45,7 +45,7 @@ function sample(n,lb,ub,::SobolSample)
if lb isa Number
return [next!(s)[1] for i = 1:n]
else
return [next!(s) for i = 1:n]
return Tuple.([next!(s) for i = 1:n])
end
end

Expand All @@ -66,7 +66,7 @@ function sample(n,lb,ub,::LatinHypercubeSample)
@inbounds for c = 1:d
x[:,c] = (ub[c]-lb[c])*x[:,c]/n .+ lb[c]
end
return x
return Tuple.(x)
end
end

Expand Down Expand Up @@ -115,6 +115,6 @@ function sample(n,lb,ub,S::LowDiscrepancySample)
@inbounds for c = 1:d
x[:,c] = (ub[c]-lb[c])*x[:,c] .+ lb[c]
end
return x
return Tuple.(x)
end
end
3 changes: 1 addition & 2 deletions test/SVMSurrogate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@ add_point!(my_svm_1D,[3.2,3.5],[7.4,8.0])
obj_N = x -> x[1]^2*x[2]
lb = [0.0,0.0]
ub = [10.0,10.0]
s = sample(100,lb,ub,UniformSample())
x = Tuple.(s)
x = sample(100,lb,ub,UniformSample())
y = obj_N.(x)
my_svm_ND = SVMSurrogate(x,y,lb,ub)
val = my_svm_ND((5.0,1.2))
Expand Down
3 changes: 1 addition & 2 deletions test/linearSurrogate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ add_point!(my_linear_surr_1D,[5.0,6.0],[8.3,9.7])
#ND
lb = [0.0,0.0]
ub = [10.0,10.0]
s = sample(5,lb,ub,SobolSample())
x = Tuple.(s)
x = sample(5,lb,ub,SobolSample())
y = [4.0,5.0,6.0,7.0,8.0]
my_linear_ND = LinearSurrogate(x,y,lb,ub)
add_point!(my_linear_ND,(10.0,11.0),9.0)
Expand Down
3 changes: 1 addition & 2 deletions test/lobachesky.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ lb = [0.0,0.0]
ub = [8.0,8.0]
alpha = 2.4
n = 8
s = sample(3200,lb,ub,SobolSample())
x = Tuple.(s)
x = sample(3200,lb,ub,SobolSample())
y = obj.(x)
my_loba_ND = LobacheskySurrogate(x,y,alpha,n,lb,ub)

Expand Down
3 changes: 1 addition & 2 deletions test/neuralSurrogate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ println(val)

lb = [0.0,0.0]
ub = [5.0,5.0]
s = sample(5,lb,ub, SobolSample())
x = Tuple.(s)
x = sample(5,lb,ub, SobolSample())
obj_ND_neural(x) = x[1]*x[2];
y = obj_ND_neural.(x)
model = Chain(Dense(2,1))
Expand Down
15 changes: 5 additions & 10 deletions test/optimization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@ my_rad_SRBFN = RadialBasis(x,y,bounds,z->norm(z),1)
surrogate_optimize(objective_function_ND,SRBF(),lb,ub,my_rad_SRBFN,UniformSample())

# Lobachesky
s = sample(5,lb,ub,UniformSample())
x = Tuple.(s)
x = sample(5,lb,ub,UniformSample())
y = objective_function_ND.(x)
alpha = 2.0
n = 4
Expand All @@ -58,8 +57,7 @@ surrogate_optimize(objective_function_ND,SRBF(),lb,ub,my_loba_ND,UniformSample()
#Linear
lb = [1.0,1.0]
ub = [6.0,6.0]
s = sample(500,lb,ub,SobolSample())
x = Tuple.(s)
x = sample(500,lb,ub,SobolSample())
objective_function_ND = z -> 3*norm(z)+1
y = objective_function_ND.(x)
my_linear_ND = LinearSurrogate(x,y,lb,ub)
Expand All @@ -68,8 +66,7 @@ surrogate_optimize(objective_function_ND,SRBF(),lb,ub,my_linear_ND,SobolSample()
#SVM
lb = [1.0,1.0]
ub = [6.0,6.0]
s = sample(5,lb,ub,SobolSample())
x = Tuple.(s)
x = sample(5,lb,ub,SobolSample())
objective_function_ND = z -> 3*norm(z)+1
y = objective_function_ND.(x)
my_SVM_ND = SVMSurrogate(x,y,lb,ub)
Expand All @@ -78,8 +75,7 @@ surrogate_optimize(objective_function_ND,SRBF(),lb,ub,my_SVM_ND,SobolSample(),ma
#Neural
lb = [1.0,1.0]
ub = [6.0,6.0]
s = sample(5,lb,ub,SobolSample())
x = Tuple.(s)
x = sample(5,lb,ub,SobolSample())
objective_function_ND = z -> 3*norm(z)+1
y = objective_function_ND.(x)
model = Chain(Dense(2,1))
Expand All @@ -92,8 +88,7 @@ surrogate_optimize(objective_function_ND,SRBF(),lb,ub,my_neural_ND_neural,SobolS
#Random Forest
lb = [1.0,1.0]
ub = [6.0,6.0]
s = sample(5,lb,ub,SobolSample())
x = Tuple.(s)
x = sample(5,lb,ub,SobolSample())
objective_function_ND = z -> 3*norm(z)+1
y = objective_function_ND.(x)
num_round = 2
Expand Down
3 changes: 1 addition & 2 deletions test/random_forest.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@ add_point!(my_forest_1D,[7.0,8.0],obj_1D.([7.0,8.0]))
#ND
lb = [0.0,0.0]
ub = [10.0,10.0]
s = sample(5,lb,ub,SobolSample())
x = Tuple.(s)
x = sample(5,lb,ub,SobolSample())
obj_ND = x -> x[1] * x[2]^2
y = obj_ND.(x)
my_forest_ND = RandomForestSurrogate(x,y,lb,ub,num_round)
Expand Down

0 comments on commit e15befb

Please sign in to comment.