Skip to content

Commit

Permalink
Add and fix rastrigin example
Browse files Browse the repository at this point in the history
  • Loading branch information
d53dave committed Nov 22, 2019
1 parent 5c38ce0 commit fa23fa8
Show file tree
Hide file tree
Showing 4 changed files with 214 additions and 36 deletions.
34 changes: 21 additions & 13 deletions examples/rastrigin/rastrigin.conf
Original file line number Diff line number Diff line change
@@ -1,44 +1,52 @@
{
# This name will be used to TODO
name = rastrigin_12000
name = rastrigin_2d

save_to_file {
type = best # or all, none
type = all # or all, none
# base_dir = /home/username/optimization_results/ # This is optional, will use cwd by default
}

model {
skip_typecheck = true
dimensions = 3
}

optimization {
thread_count = 8
initial_temp = 5.0,
# random_seed = -919,
max_steps = 5000
thread_count = 256
initial_temp = 1000.0,
max_steps = 3200
min_temp = 1e-30
}

debug {
gpu_simulator: enabled
}
# debug {
# gpu_simulator: enabled
# }

remote {
local_docker = True
# local_docker = True
# platform = aws
# use_existing_instances = false
# terminate_on_exit = true
terminate_on_exit = false
platform = aws
aws {
region = eu-central-1
# # These will be picked up from ~/.aws/credentials or ENV
# secret_key = 123
# access_key = 123
worker_instance_type = g2.2xlarge
broker_instance_type = m5.2xlarge
# worker_instance_type = g2.2xlarge
# broker_instance_type = m5.2xlarge
worker_count = 1
timeout_provision = 2000000
timeout_startup = 1000000
timeout_deploy = 1000000
timeout_optimization = -1
instances { # note that these need to be IDs, not names
broker_password = "kuV52Y9RI2s9G6ALmpgYa32Cbk514i2o"
security_group = "sg-093c978f34b9cffc0"
broker = "i-0d9173d6ef56c7314"
workers = ["i-0eb3d791f7945561c"]
}
}
}
}
52 changes: 52 additions & 0 deletions examples/rastrigin/rastrigin.docker.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
{
# This name will be used to TODO
name = rastrigin_2d_docker

save_to_file {
type = all # or all, none
# base_dir = /home/username/optimization_results/ # This is optional, will use cwd by default
}

model {
skip_typecheck = true
dimensions = 3
}

optimization {
thread_count = 256
initial_temp = 1000.0,
max_steps = 3200
min_temp = 1e-30
}

debug {
gpu_simulator: enabled
}

remote {
local_docker = True
# platform = aws
# use_existing_instances = false
# terminate_on_exit = false
# platform = aws
# aws {
# region = eu-central-1
# # These will be picked up from ~/.aws/credentials or ENV
# secret_key = 123
# access_key = 123
# worker_instance_type = g2.2xlarge
# broker_instance_type = m5.2xlarge
# worker_count = 1
# timeout_provision = 2000000
# timeout_startup = 1000000
# timeout_deploy = 1000000
# timeout_optimization = -1
# instances { # note that these need to be IDs, not names
# broker_password = "kuV52Y9RI2s9G6ALmpgYa32Cbk514i2o"
# security_group = "sg-093c978f34b9cffc0"
# broker = "i-0d9173d6ef56c7314"
# workers = ["i-0eb3d791f7945561c"]
# }
# }
}
}
104 changes: 104 additions & 0 deletions examples/rastrigin/rastrigin_docker_opt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
# Rastrigin Function
# https://www.sfu.ca/~ssurjano/rastr.html
#
# Dimensions: d
#
# The Rastrigin function has several local minima. It is highly multimodal, but locations of the minima are regularly
# distributed.
#
# Input Domain:
# The function is usually evaluated on the hypercube xi in [-5.12, 5.12], for all i = 1, ..., d.
#
# Global Minimum:
# f(x*) = 0, at (0, 0, 0, 0)
#
# References:
# Global Optimization Test Problems. Retrieved June 2013, from
# http://www-optima.amp.i.kyoto-u.ac.jp/member/student/hedar/Hedar_files/TestGO.htm.

# Pohlheim, H. GEATbx Examples: Examples of Objective Functions (2005). Retrieved June 2013, from http://www.geatbx.com/download/GEATbx_ObjFunExpl_v37.pdf.

import math

from csaopt.model import RandomDistribution, Precision
from csaopt.utils import clamp
from csaopt.utils import FakeCuda as cuda
from typing import MutableSequence, Sequence, Any, Tuple
from math import pi

# Configuration

# -- Globals

max_steps = 320


@cuda.jit(device=True)
def scale(val, old_min, old_max, new_min, new_max):
return (val - old_min) / (old_max - old_min) * (new_max - new_min) + new_min


@cuda.jit(device=True, inline=True)
def copy_state(b, a):
for i in range(len(b)):
a[i] = b[i]


# -- Globals


def distribution() -> RandomDistribution:
return RandomDistribution.Uniform


def precision() -> Precision:
return Precision.Float64


def dimensions() -> int:
return 2


def empty_state() -> Tuple:
return (0.0, 0.0)


# Functions


def cool(initial_temp: float, old_temp: float, step: int) -> float:
return (1 - 0.3) * old_temp


def acceptance_func(e_old: float, e_new: float, temp: float, rnd: float) -> float:
# prevent math.exp from under or overflowing, we can anyway constrain 0 < e^x <= (e^0 == 1)
x = clamp(-80, -(e_new - e_old) / temp, 0.1)
return math.exp(x) >= rnd


def initialize(state: MutableSequence, randoms: Sequence[float]) -> None:
for i in range(len(state)):
state[i] = scale(randoms[i], 0.0, 1.0, -5.12, 5.12)
return


def evaluate(state: Sequence) -> float:
d = len(state)
t1 = 0.0
for i in range(d):
x_i = state[i]
t1 += x_i * x_i - 10 * math.cos(2 * pi * x_i)
return 10 * d + t1


def generate_next(state: Sequence, new_state: MutableSequence, randoms: Sequence[float], step: int) -> Any:
d = len(state)
for dim in range(d):
delta = (randoms[dim] - 0.5) * 7 * (1 - float(step) / max_steps * 1.1)
new_val = state[dim] + delta
# print('New val', new_val, 'at scale 1 -', step, '/', max_steps, '=',
# 1 - (float(step) / max_steps))
if new_val > 5.12 or new_val < -5.12:
new_val = state[dim] - delta
new_state[dim] = new_val
return
60 changes: 37 additions & 23 deletions examples/rastrigin/rastrigin_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,30 @@

from csaopt.model import RandomDistribution, Precision
from csaopt.utils import clamp
from csaopt.utils import FakeCuda as cuda
from typing import MutableSequence, Sequence, Any, Tuple
from math import pi

# Configuration

# -- Globals

max_steps = 3200


@cuda.jit(device=True)
def scale(val, old_min, old_max, new_min, new_max):
return (val - old_min) / (old_max - old_min) * (new_max - new_min) + new_min


@cuda.jit(device=True, inline=True)
def copy_state(b, a):
for i in range(len(b)):
a[i] = b[i]


# -- Globals


def distribution() -> RandomDistribution:
return RandomDistribution.Uniform
Expand All @@ -37,54 +56,49 @@ def precision() -> Precision:


def dimensions() -> int:
return 10
return 2


def empty_state() -> Tuple:
return (
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
)
return (0.0, 0.0)


# Functions


def cool(initial_temp: float, old_temp: float, step: int) -> float:
return initial_temp * math.pow(0.97, step)
return (1 - 0.03) * old_temp


def acceptance_func(e_old: float, e_new: float, temp: float, rnd: float) -> float:
# prevent math.exp from under or overflowing, we can anyway constrain 0 < e^x <= (e^0 == 1)
x = clamp(-80, (e_old - e_new) / temp, 0.1)
return math.exp(x) > rnd
x = clamp(-80, -(e_new - e_old) / temp, 0.1)
return math.exp(x) >= rnd


def initialize(state: MutableSequence, randoms: Sequence[float]) -> None:
generate_next(state, state, randoms, 0) # just delegate to generate_next
for i in range(len(state)):
state[i] = scale(randoms[i], 0.0, 1.0, -5.12, 5.12)
return


def evaluate(state: Sequence) -> float:
d = len(state)

t1 = 0.0
for i in range(d):
x_i = state[i]
t1 += x_i * x_i - 10 * math.cos(2 * pi * x_i)

return 10 * d + t1


def generate_next(state: Sequence, new_state: MutableSequence, randoms: Sequence[float], step) -> Any:
for i in range(len(state)):
new_state[i] = clamp(-5.12, 10.24 * (randoms[i] - 0.5), 5.12)
return
def generate_next(state: Sequence, new_state: MutableSequence, randoms: Sequence[float], step: int) -> Any:
d = len(state)
for dim in range(d):
delta = (randoms[dim] - 0.5) * 7 * (1 - float(step) / max_steps * 1.1)
new_val = state[dim] + delta
# print('New val', new_val, 'at scale 1 -', step, '/', max_steps, '=',
# 1 - (float(step) / max_steps))
if new_val > 5.12 or new_val < -5.12:
new_val = state[dim] - delta
new_state[dim] = new_val
return

0 comments on commit fa23fa8

Please sign in to comment.