Skip to content

Commit

Permalink
Add option to allow for dynamic initial sampling (#76)
Browse files Browse the repository at this point in the history
  • Loading branch information
frthjf committed Mar 14, 2024
1 parent 56e0643 commit 5aa402b
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 8 deletions.
67 changes: 60 additions & 7 deletions dmosopt/dmosopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,8 @@ def __init__(
n_initial=10,
initial_maxiter=5,
initial_method="slh",
dynamic_initial_sampling=None,
dynamic_initial_sampling_kwargs=None,
verbose=False,
reduce_fun=None,
reduce_fun_args=None,
Expand Down Expand Up @@ -595,6 +597,8 @@ def __init__(
self.num_generations = num_generations
self.resample_fraction = resample_fraction
self.distance_metric = distance_metric
self.dynamic_initial_sampling = dynamic_initial_sampling
self.dynamic_initial_sampling_kwargs = dynamic_initial_sampling_kwargs
self.surrogate_method_name = surrogate_method_name
self.surrogate_method_kwargs = surrogate_method_kwargs
self.surrogate_custom_training = surrogate_custom_training
Expand Down Expand Up @@ -1203,12 +1207,12 @@ def _process_requests(self):
self.eval_reqs[problem_id][task_id] = eval_req_dict[problem_id]

if (
self.save
and (self.eval_count > 0)
and (self.saved_eval_count < self.eval_count)
):
self.save_evals()
self.saved_eval_count = self.eval_count
self.save
and (self.eval_count > 0)
and (self.saved_eval_count < self.eval_count)
):
self.save_evals()
self.saved_eval_count = self.eval_count

assert len(task_ids) == 0
return self.eval_count, self.saved_eval_count
Expand All @@ -1228,7 +1232,56 @@ def run_epoch(self):
eval_count, saved_eval_count = self._process_requests()

for problem_id in self.problem_ids:
self.optimizer_dict[problem_id].initialize_epoch(epoch)
distopt = self.optimizer_dict[problem_id]

# dynamic sampling
if self.dynamic_initial_sampling is not None:
dynamic_initial_sampler = import_object_by_path(
self.dynamic_initial_sampling
)

dyn_sample_iter_count = 0
while True:
more_samples = dynamic_initial_sampler(
iteration=dyn_sample_iter_count,
evaluated_samples=distopt.completed,
next_samples=opt.xinit(
self.n_initial,
distopt.prob.param_names,
distopt.prob.lb,
distopt.prob.ub,
nPrevious=None,
maxiter=self.initial_maxiter,
method=self.initial_method,
local_random=self.local_random,
logger=self.logger,
),
sampler={
'n_initial': self.n_initial,
'maxiter': self.initial_maxiter,
'method': self.initial_method,
"param_names": distopt.prob.param_names,
"xlb": distopt.prob.lb,
"xub": distopt.prob.ub,
},
**(self.dynamic_initial_sampling_kwargs or {}),
)

if more_samples is None:
break

distopt.reqs.extend(
[
EvalRequest(more_samples[i, :], None, 0)
for i in range(more_samples.shape[0])
]
)

self._process_requests()

dyn_sample_iter_count += 1

distopt.initialize_epoch(epoch)

while not completed_epoch:
eval_count, saved_eval_count = self._process_requests()
Expand Down
2 changes: 2 additions & 0 deletions docs/guide/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,8 @@ The effectiveness of the optimization will greatly depend on the [sampling strat

dmosopt supports evaluating different problems with the same set of parameters. Use `problem_ids` to specify a set of problem IDs (otherwise, it defaults to `set([0])`). The objective function must return a dictionary of the form `{ problem_id: ... }` for each ID.

Furthermore, it is possible to implement dynamic sampling strategies via the `dynamic_initial_sampling` option (and `dynamic_initial_sampling_kwargs`). [Learn more](./sampling)

### Surrogate strategy

[Surrogate models](./surrogates) can greatly improve sampling effectiveness and convergence. Use `surrogate_method_name` to point to a strategy; method specific options can be passed via `surrogate_method_kwargs`. Moreover, to use a custom training method, you can pass its Python import path to `surrogate_custom_training` (and additional arguments to `surrogate_custom_training_kwargs`).
Expand Down
27 changes: 26 additions & 1 deletion docs/guide/sampling.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,29 @@ dmosopt implements various sampling strategies listed below:
</li>
</ul>

You may also point to your custom implementations by specifying a Python import path.
You may also point to your custom implementations by specifying a Python import path.

## Dynamic sampling

By default, the number of samples is pre-determined via the `n_initial` parameter. However, dmosopt supports dynamic sampling strategies that generate samples until custom criteria are met. To implement a dynamic sampling strategy, you can point `dynamic_initial_sampling` to a callable object (with optional parameters `dynamic_initial_sampling_kwargs`). The callable receives the iteration count, the sampler options, the evaluated samples up to this point, as well as an unevaluated next batch of samples. It must return a new set of samples or `None` to end the dynamic sampling process. For example:

```python
from dmosopt.datatypes import EvalEntry

def dynamic_sampling(
interation: int,
evaluated_samples: list[EvalEntry],
next_samples: list,
sampler: dict[], # contains n_initial, maxiter, method, param_names, xlb, xub
**kwargs
):
done = ... # decide if sampling is complete based on `evaluated_samples`

if done:
# no more samples, sampling will be complete using `evaluated_samples`
return

# return the next set of samples
# (can be the unmodified `next_samples` or some custom set of samples)
return next_samples
```

0 comments on commit 5aa402b

Please sign in to comment.