Skip to content

Commit

Permalink
Merge pull request #942 from Delaunay/no_singleton
Browse files Browse the repository at this point in the history
no singleton test
  • Loading branch information
bouthilx committed Jul 29, 2022
2 parents b27d3bc + 2b9d745 commit 73cc6cd
Show file tree
Hide file tree
Showing 88 changed files with 2,579 additions and 2,183 deletions.
1 change: 1 addition & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ jobs:
strategy:
matrix:
toxenv: [pylint, doc8, docs]

steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
Expand Down
4 changes: 2 additions & 2 deletions dashboard/src/src/__tests__/flattenObject.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ test('test flatten object', () => {
const input = {
a: 1,
b: {
ba: 'world',
be: 'world',
bb: 1.5,
bc: {
bd: {
Expand All @@ -30,7 +30,7 @@ test('test flatten object', () => {
const keys = Object.keys(output);
expect(keys.length).toBe(12);
expect(output.hasOwnProperty('a')).toBeTruthy();
expect(output.hasOwnProperty('b.ba')).toBeTruthy();
expect(output.hasOwnProperty('b.be')).toBeTruthy();
expect(output.hasOwnProperty('b.bb')).toBeTruthy();
expect(output.hasOwnProperty('b.bc.bd.a key')).toBeTruthy();
expect(output.hasOwnProperty('b.bc.bd.another key.x')).toBeTruthy();
Expand Down
10 changes: 3 additions & 7 deletions docs/scripts/build_database_and_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
import subprocess

from orion.client import get_experiment
from orion.core.utils.singleton import update_singletons
from orion.storage.base import get_storage, setup_storage
from orion.storage.base import setup_storage

ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
DOC_SRC_DIR = os.path.join(ROOT_DIR, "..", "src")
Expand Down Expand Up @@ -69,9 +68,8 @@ def prepare_dbs():

def setup_tmp_storage(host):
# Clear singletons
update_singletons()

setup_storage(
return setup_storage(
storage={
"type": "legacy",
"database": {
Expand All @@ -81,8 +79,6 @@ def setup_tmp_storage(host):
}
)

return get_storage()


def load_data(host):
print("Loading data from", host)
Expand All @@ -102,7 +98,7 @@ def copy_data(data, host=TMP_DB_HOST):
storage = setup_tmp_storage(host)
for exp_id, experiment in data["experiments"].items():
del experiment["_id"]
storage.create_experiment(experiment)
storage.create_experiment(experiment, storage=storage)
assert exp_id != experiment["_id"]
trials = []
for trial in data["trials"][exp_id]:
Expand Down
6 changes: 2 additions & 4 deletions docs/scripts/filter_database.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
import shutil

from orion.core.io.orion_cmdline_parser import OrionCmdlineParser
from orion.storage.base import get_storage, setup_storage
from orion.storage.base import setup_storage

shutil.copy("./examples/plotting/database.pkl", "./examples/base_db.pkl")

setup_storage(
storage = setup_storage(
dict(
type="legacy",
database=dict(type="pickleddb", host="./examples/base_db.pkl"),
Expand All @@ -23,8 +23,6 @@
("lateral-view-multitask3", 1): "3-dim-cat-shape-exp",
}

storage = get_storage()


def update_dropout(experiment_config):
metadata = experiment_config["metadata"]
Expand Down
13 changes: 9 additions & 4 deletions examples/benchmark/profet_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
ProfetXgBoostTask,
)
from orion.benchmark.task.profet.profet_task import MetaModelConfig, ProfetTask
from orion.storage.base import setup_storage

try:
from simple_parsing.helpers import choice
Expand Down Expand Up @@ -103,7 +104,15 @@ def main(config: ProfetExperimentConfig):

print(f"Storage file used: {config.storage_pickle_path}")

storage = setup_storage(
{
"type": "legacy",
"database": {"type": "pickleddb", "host": str(config.storage_pickle_path)},
}
)

benchmark = get_or_create_benchmark(
storage,
name=config.name,
algorithms=config.algorithms,
targets=[
Expand All @@ -114,10 +123,6 @@ def main(config: ProfetExperimentConfig):
],
}
],
storage={
"type": "legacy",
"database": {"type": "pickleddb", "host": str(config.storage_pickle_path)},
},
debug=config.debug,
)
benchmark.setup_studies()
Expand Down
22 changes: 17 additions & 5 deletions src/orion/benchmark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import orion.core
from orion.client import create_experiment
from orion.executor.base import executor_factory
from orion.storage.base import BaseStorageProtocol


class Benchmark:
Expand All @@ -20,8 +21,12 @@ class Benchmark:
Parameters
----------
storage: Storage
Instance of the storage to use
name: str
Name of the benchmark
algorithms: list, optional
Algorithms used for benchmark, and for each algorithm, it can be formats as below:
Expand Down Expand Up @@ -49,19 +54,26 @@ class Benchmark:
task: list
Task objects
storage: dict, optional
Configuration of the storage backend.
executor: `orion.executor.base.BaseExecutor`, optional
Executor to run the benchmark experiments
"""

def __init__(self, name, algorithms, targets, storage=None, executor=None):
def __init__(
self,
storage,
name,
algorithms,
targets,
executor=None,
):
assert isinstance(storage, BaseStorageProtocol)

self._id = None
self.name = name
self.algorithms = algorithms
self.targets = targets
self.metadata = {}
self.storage_config = storage
self.storage = storage
self._executor = executor
self._executor_owner = False

Expand Down Expand Up @@ -353,7 +365,7 @@ def setup_experiments(self):
space=space,
algorithms=algorithm.experiment_algorithm,
max_trials=max_trials,
storage=self.benchmark.storage_config,
storage=self.benchmark.storage,
executor=executor,
)
self.experiments_info.append((task_index, experiment))
Expand Down
50 changes: 27 additions & 23 deletions src/orion/benchmark/benchmark_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,24 @@
from orion.benchmark.task.base import bench_task_factory
from orion.core.io.database import DuplicateKeyError
from orion.core.utils.exceptions import NoConfigurationError
from orion.storage.base import get_storage, setup_storage

logger = logging.getLogger(__name__)


def get_or_create_benchmark(
name, algorithms=None, targets=None, storage=None, executor=None, debug=False
storage,
name,
algorithms=None,
targets=None,
executor=None,
):
"""
Create or get a benchmark object.
Parameters
----------
storage: BaseStorageProtocol
Instance of the storage to use
name: str
Name of the benchmark
algorithms: list, optional
Expand All @@ -35,30 +40,25 @@ def get_or_create_benchmark(
Assessment objects
task: list
Task objects
storage: dict, optional
Configuration of the storage backend.
executor: `orion.executor.base.BaseExecutor`, optional
Executor to run the benchmark experiments
debug: bool, optional
If using in debug mode, the storage config is overridden with legacy:EphemeralDB.
Defaults to False.
Returns
-------
An instance of `orion.benchmark.Benchmark`
"""
setup_storage(storage=storage, debug=debug)

# fetch benchmark from db
db_config = _fetch_benchmark(name)
db_config = _fetch_benchmark(storage, name)

benchmark_id = None
input_configure = None

if db_config:
if algorithms or targets:
input_benchmark = Benchmark(name, algorithms, targets)
input_benchmark = Benchmark(storage, name, algorithms, targets)
input_configure = input_benchmark.configuration

benchmark_id, algorithms, targets = _resolve_db_config(db_config)

if not algorithms or not targets:
Expand All @@ -68,7 +68,11 @@ def get_or_create_benchmark(
)

benchmark = _create_benchmark(
name, algorithms, targets, storage=storage, executor=executor
storage,
name,
algorithms,
targets,
executor=executor,
)

if input_configure and input_benchmark.configuration != benchmark.configuration:
Expand All @@ -80,7 +84,7 @@ def get_or_create_benchmark(
if benchmark_id is None:
logger.debug("Benchmark not found in DB. Now attempting registration in DB.")
try:
_register_benchmark(benchmark)
_register_benchmark(storage, benchmark)
logger.debug("Benchmark successfully registered in DB.")
except DuplicateKeyError:
logger.info(
Expand All @@ -89,7 +93,11 @@ def get_or_create_benchmark(
)
benchmark.close()
benchmark = get_or_create_benchmark(
name, algorithms, targets, storage, executor, debug
storage,
name,
algorithms,
targets,
executor,
)

return benchmark
Expand Down Expand Up @@ -132,9 +140,9 @@ def _resolve_db_config(db_config):
return benchmark_id, algorithms, targets


def _create_benchmark(name, algorithms, targets, storage, executor):
def _create_benchmark(storage, name, algorithms, targets, executor):

benchmark = Benchmark(name, algorithms, targets, storage, executor)
benchmark = Benchmark(storage, name, algorithms, targets, executor)
benchmark.setup_studies()

return benchmark
Expand All @@ -147,22 +155,18 @@ def _create_study(benchmark, algorithms, assess, task):
return study


def _fetch_benchmark(name):

if name:
configs = get_storage().fetch_benchmark({"name": name})
else:
configs = get_storage().fetch_benchmark({})
def _fetch_benchmark(storage, name):
configs = storage.fetch_benchmark({"name": name})

if not configs:
return {}

return configs[0]


def _register_benchmark(benchmark):
def _register_benchmark(storage, benchmark):
benchmark.metadata["datetime"] = datetime.datetime.utcnow()
config = benchmark.configuration
# This will raise DuplicateKeyError if a concurrent experiment with
# identical (name, metadata.user) is written first in the database.
get_storage().create_benchmark(config)
storage.create_benchmark(config)
Loading

0 comments on commit 73cc6cd

Please sign in to comment.