Skip to content

Commit

Permalink
more unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
ncilfone committed Jul 15, 2021
1 parent a5f1e14 commit 6f4b2e5
Show file tree
Hide file tree
Showing 13 changed files with 263 additions and 7 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/python-coverage.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }}-${{ hashFiles('REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/DEV_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/S3_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/TUNE_REQUIREMENTS.txt') }}
key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }}-${{ hashFiles('REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/DEV_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/S3_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/TUNE_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/TEST_EXTRAS_REQUIREMENTS_REQUIREMENTS.txt') }}

- name: Install dependencies and dev dependencies
run: |
Expand All @@ -33,6 +33,7 @@ jobs:
pip install -r ./requirements/DEV_REQUIREMENTS.txt
pip install -r ./requirements/S3_REQUIREMENTS.txt
pip install -r ./requirements/TUNE_REQUIREMENTS.txt
pip install -r ./requirements/TEST_EXTRAS_REQUIREMENTS.txt
- name: Test with pytest
run: |
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/python-pytest-tune.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }}-${{ hashFiles('REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/DEV_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/S3_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/TUNE_REQUIREMENTS.txt') }}
key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }}-${{ hashFiles('REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/DEV_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/S3_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/TUNE_REQUIREMENTS.txt') }}-${{ hashFiles('./requirements/TEST_EXTRAS_REQUIREMENTS_REQUIREMENTS.txt') }}

- name: Install dependencies
run: |
Expand All @@ -35,6 +35,7 @@ jobs:
pip install -r ./requirements/DEV_REQUIREMENTS.txt
pip install -r ./requirements/S3_REQUIREMENTS.txt
pip install -r ./requirements/TUNE_REQUIREMENTS.txt
pip install -r ./requirements/TEST_EXTRAS_REQUIREMENTS.txt
- name: Test with pytest
run: |
Expand Down
1 change: 0 additions & 1 deletion examples/tune/optuna/tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ def main():
# Pull the study and trials object out of the return dictionary and pass it to the tell call using the study
# object
tuner_status["study"].tell(tuner_status["trial"], val_acc)
print("hi")


if __name__ == "__main__":
Expand Down
1 change: 1 addition & 0 deletions requirements/TEST_EXTRAS_REQUIREMENTS.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
scikit-learn
2 changes: 1 addition & 1 deletion spock/addons/tune/tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(
# Todo: add ax type check here
accept_types = OptunaTunerConfig
if not isinstance(tuner_config, accept_types):
raise ValueError(
raise TypeError(
f"Passed incorrect tuner_config type of {type(tuner_config)} -- must be of type "
f"{repr(accept_types)}"
)
Expand Down
1 change: 0 additions & 1 deletion tests/base/test_writers.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ def test_yaml_file_writer_save_path(self, monkeypatch):
# Test the chained version
now = datetime.datetime.now()
curr_int_time = int(f'{now.year}{now.month}{now.day}{now.hour}{now.second}')

config_values = config.save(file_extension='.yaml', file_name=f'pytest.{curr_int_time}').generate()
yaml_regex = re.compile(fr'pytest.{curr_int_time}.'
fr'[a-fA-F0-9]{{8}}-[a-fA-F0-9]{{4}}-[a-fA-F0-9]{{4}}-'
Expand Down
32 changes: 32 additions & 0 deletions tests/conf/yaml/test_hp_cast.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Test conf for all hyper-parameters
HPOne:
hp_int:
type: int
bounds: [ 10, 100 ]
log_scale: false
hp_float:
type: float
bounds: [ 10.0, 100.0 ]
log_scale: false
hp_int_log:
type: int
bounds: [ 10, 100 ]
log_scale: true
hp_float_log:
type: float
bounds: [ 10.0, 100.0 ]
log_scale: true

HPTwo:
hp_choice_int:
type: int
choices: ["hello", "ciao", "bonjour" ]
hp_choice_float:
type: float
choices: [ 10.0, 20.0, 40.0, 80.0 ]
hp_choice_bool:
type: bool
choices: [ true, false ]
hp_choice_str:
type: str
choices: [ "hello", "ciao", "bonjour" ]
32 changes: 32 additions & 0 deletions tests/conf/yaml/test_hp_cast_bounds.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Test conf for all hyper-parameters
HPOne:
hp_int:
type: int
bounds: [ 10, 100 ]
log_scale: false
hp_float:
type: float
bounds: [ 10.0, 100.0 ]
log_scale: false
hp_int_log:
type: int
bounds: [ 'foo', 'bar' ]
log_scale: true
hp_float_log:
type: float
bounds: [ 10.0, 100.0 ]
log_scale: true

HPTwo:
hp_choice_int:
type: int
choices: [10, 20, 40, 80 ]
hp_choice_float:
type: float
choices: [ 10.0, 20.0, 40.0, 80.0 ]
hp_choice_bool:
type: bool
choices: [ true, false ]
hp_choice_str:
type: str
choices: [ "hello", "ciao", "bonjour" ]
12 changes: 12 additions & 0 deletions tests/conf/yaml/test_optuna.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
###############################
# optuna simple sklearn example
###############################

LogisticRegressionHP:
c:
type: float
bounds: [1E-07, 10.0]
log_scale: true
solver:
type: str
choices: ["lbfgs", "saga"]
6 changes: 6 additions & 0 deletions tests/tune/attr_configs_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,9 @@ class HPTwo:
hp_choice_float: ChoiceHyperParameter
hp_choice_bool: ChoiceHyperParameter
hp_choice_str: ChoiceHyperParameter


@spockTuner
class LogisticRegressionHP:
c: RangeHyperParameter
solver: ChoiceHyperParameter
72 changes: 72 additions & 0 deletions tests/tune/test_cmd_line.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
from tests.tune.attr_configs_test import *
import pytest
import sys
from spock.builder import ConfigArgBuilder
from spock.addons.tune import OptunaTunerConfig


class TestOptunaCmdLineOverride:
@staticmethod
@pytest.fixture
def arg_builder(monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config',
'./tests/conf/yaml/test_hp.yaml',
'--HPOne.hp_int.bounds', '(1, 1000)',
'--HPOne.hp_int_log.bounds', '(1, 1000)',
'--HPOne.hp_float.bounds', '(1.0, 1000.0)',
'--HPOne.hp_float_log.bounds', '(1.0, 1000.0)',
'--HPTwo.hp_choice_int.choices', '[1, 2, 4, 8]',
'--HPTwo.hp_choice_float.choices', '[1.0, 2.0, 4.0, 8.0]',
'--HPTwo.hp_choice_str.choices', "['is', 'it ', 'me', 'youre', 'looking', 'for']"
])
optuna_config = OptunaTunerConfig(study_name="Tests", direction="maximize")
config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)
return config

def test_hp_one(self, arg_builder):
assert arg_builder._tune_namespace.HPOne.hp_int.bounds == (1, 1000)
assert arg_builder._tune_namespace.HPOne.hp_int.type == 'int'
assert arg_builder._tune_namespace.HPOne.hp_int.log_scale is False
assert arg_builder._tune_namespace.HPOne.hp_int_log.bounds == (1, 1000)
assert arg_builder._tune_namespace.HPOne.hp_int_log.type == 'int'
assert arg_builder._tune_namespace.HPOne.hp_int_log.log_scale is True
assert arg_builder._tune_namespace.HPOne.hp_float.bounds == (1.0, 1000.0)
assert arg_builder._tune_namespace.HPOne.hp_float.type == 'float'
assert arg_builder._tune_namespace.HPOne.hp_float.log_scale is False
assert arg_builder._tune_namespace.HPOne.hp_float_log.bounds == (1.0, 1000.0)
assert arg_builder._tune_namespace.HPOne.hp_float_log.type == 'float'
assert arg_builder._tune_namespace.HPOne.hp_float_log.log_scale is True

def test_hp_two(self, arg_builder):
assert arg_builder._tune_namespace.HPTwo.hp_choice_int.type == 'int'
assert arg_builder._tune_namespace.HPTwo.hp_choice_int.choices == [1, 2, 4, 8]
assert arg_builder._tune_namespace.HPTwo.hp_choice_float.type == 'float'
assert arg_builder._tune_namespace.HPTwo.hp_choice_float.choices == [1.0, 2.0, 4.0, 8.0]
assert arg_builder._tune_namespace.HPTwo.hp_choice_bool.type == 'bool'
assert arg_builder._tune_namespace.HPTwo.hp_choice_bool.choices == [True, False]
assert arg_builder._tune_namespace.HPTwo.hp_choice_str.type == 'str'
assert arg_builder._tune_namespace.HPTwo.hp_choice_str.choices == ['is', 'it ', 'me', 'youre', 'looking', 'for']

def test_sampling(self, arg_builder):
# Draw 100 random samples and make sure all fall within all of the bounds or sets
for _ in range(100):
hp_attrs = arg_builder.sample()
assert 1 <= hp_attrs.HPOne.hp_int <= 1000
assert isinstance(hp_attrs.HPOne.hp_int, int) is True
assert 1 <= hp_attrs.HPOne.hp_int_log <= 1000
assert isinstance(hp_attrs.HPOne.hp_int_log, int) is True
assert 1.0 <= hp_attrs.HPOne.hp_float <= 1000.0
assert isinstance(hp_attrs.HPOne.hp_float, float) is True
assert 1.0 <= hp_attrs.HPOne.hp_float_log <= 1000.0
assert isinstance(hp_attrs.HPOne.hp_float_log, float) is True
assert hp_attrs.HPTwo.hp_choice_int in [1, 2, 4, 8]
assert isinstance(hp_attrs.HPTwo.hp_choice_int, int) is True
assert hp_attrs.HPTwo.hp_choice_float in [1.0, 2.0, 4.0, 8.0]
assert isinstance(hp_attrs.HPTwo.hp_choice_float, float) is True
assert hp_attrs.HPTwo.hp_choice_bool in [True, False]
assert isinstance(hp_attrs.HPTwo.hp_choice_bool, bool) is True
assert hp_attrs.HPTwo.hp_choice_str in ['is', 'it ', 'me', 'youre', 'looking', 'for']
assert isinstance(hp_attrs.HPTwo.hp_choice_str, str) is True
69 changes: 67 additions & 2 deletions tests/tune/test_optuna.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
# -*- coding: utf-8 -*-
import datetime
from tests.tune.base_asserts_test import *
from tests.tune.attr_configs_test import *
import pytest
import os
import re
import sys
from spock.builder import ConfigArgBuilder
from spock.addons.tune import OptunaTunerConfig
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split


class TestOptunaBasic(AllTypes):
Expand All @@ -14,7 +20,7 @@ def arg_builder(monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config',
'./tests/conf/yaml/test_hp.yaml'])
optuna_config = OptunaTunerConfig(study_name="Tests", direction="maximize")
optuna_config = OptunaTunerConfig(study_name="Basic Tests", direction="maximize")
config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)
return config

Expand All @@ -26,6 +32,65 @@ def arg_builder(monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config',
'./tests/conf/yaml/test_hp.yaml'])
optuna_config = OptunaTunerConfig(study_name="Tests", direction="maximize")
optuna_config = OptunaTunerConfig(study_name="Sample Tests", direction="maximize")
config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)
return config


class TestIrisOptuna:
@staticmethod
@pytest.fixture
def arg_builder(monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config',
'./tests/conf/yaml/test_optuna.yaml'])
# Optuna config -- this will internally spawn the study object for the define-and-run style which will be returned
# as part of the call to sample()
optuna_config = OptunaTunerConfig(
study_name="Iris Logistic Regression Tests", direction="maximize"
)
config = ConfigArgBuilder(LogisticRegressionHP).tuner(optuna_config)
return config

def test_iris(self, arg_builder):
# Load the iris data
X, y = load_iris(return_X_y=True)
# Split the Iris data
X_train, X_valid, y_train, y_valid = train_test_split(X, y)

# Now we iterate through a bunch of optuna trials
for _ in range(10):
# The crux of spock support -- call save w/ the add_tuner_sample flag to write the current draw to file and
# then call save to return the composed Spockspace of the fixed parameters and the sampled parameters
# Under the hood spock uses the define-and-run Optuna interface -- thus it handled the underlying 'ask' call
# and returns the necessary trial object in the return dictionary to call 'tell' with the study object
now = datetime.datetime.now()
curr_int_time = int(f'{now.year}{now.month}{now.day}{now.hour}{now.second}')
hp_attrs = arg_builder.save(
add_tuner_sample=True, user_specified_path="/tmp", file_name=f'pytest.{curr_int_time}',
).sample()
# Use the currently sampled parameters in a simple LogisticRegression from sklearn
clf = LogisticRegression(
C=hp_attrs.LogisticRegressionHP.c,
solver=hp_attrs.LogisticRegressionHP.solver,
)
clf.fit(X_train, y_train)
val_acc = clf.score(X_valid, y_valid)
# Get the status of the tuner -- this dict will contain all the objects needed to update
tuner_status = arg_builder.tuner_status
# Pull the study and trials object out of the return dictionary and pass it to the tell call using the study
# object
tuner_status["study"].tell(tuner_status["trial"], val_acc)

yaml_regex = re.compile(fr'pytest.{curr_int_time}.hp.sample.[0-9]+.'
fr'[a-fA-F0-9]{{8}}-[a-fA-F0-9]{{4}}-[a-fA-F0-9]{{4}}-'
fr'[a-fA-F0-9]{{4}}-[a-fA-F0-9]{{12}}.spock.cfg.yaml')
matches = [re.fullmatch(yaml_regex, val) for val in os.listdir('/tmp')
if re.fullmatch(yaml_regex, val) is not None]
fname = f'/tmp/{matches[0].string}'
assert os.path.exists(fname)
with open(fname, 'r') as fin:
print(fin.read())
# Clean up if assert is good
if os.path.exists(fname):
os.remove(fname)
36 changes: 36 additions & 0 deletions tests/tune/test_raises.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
from tests.tune.attr_configs_test import *
import pytest
import sys
from spock.builder import ConfigArgBuilder
import optuna


class TestIncorrectTunerConfig:
def test_incorrect_tuner_config(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config',
'./tests/conf/yaml/test_hp.yaml'])
optuna_config = optuna.create_study(study_name="Tests", direction='minimize')
with pytest.raises(TypeError):
config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)


class TestInvalidCastChoice:
def test_invalid_cast_choice(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config',
'./tests/conf/yaml/test_hp_cast.yaml'])
optuna_config = optuna.create_study(study_name="Tests", direction='minimize')
with pytest.raises(TypeError):
config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)


class TestInvalidCastRange:
def test_invalid_cast_range(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config',
'./tests/conf/yaml/test_hp_cast_bounds.yaml'])
optuna_config = optuna.create_study(study_name="Tests", direction='minimize')
with pytest.raises(ValueError):
config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)

0 comments on commit 6f4b2e5

Please sign in to comment.