Skip to content

Commit

Permalink
NNICTL set classArgs as optional (#374)
Browse files Browse the repository at this point in the history
In nnictl, classArgs is not required, now set it as optional for some kind of tuner and assessor may not require classArgs.
  • Loading branch information
SparkSnail committed Nov 23, 2018
1 parent 28e26ae commit 851955e
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 40 deletions.
23 changes: 0 additions & 23 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
@@ -1,28 +1,5 @@
jobs:

- job: 'Install_through_pip'
pool:
vmImage: 'Ubuntu 16.04'
strategy:
matrix:
Python36:
PYTHON_VERSION: '3.6'

steps:
- script: python3 -m pip install --upgrade pip setuptools
displayName: 'Install python tools'
- script: |
python3 -m pip install nni --user
displayName: 'Install nni toolkit via pip'
- script: |
cd test
PATH=$HOME/.local/bin:$PATH python3 naive_test.py
displayName: 'Integration tests'
- script: |
cd test
PATH=$HOME/.local/bin:$PATH python3 sdk_test.py
displayName: 'Built-in tuner tests'
- job: 'Install_through_source_code'
pool:
vmImage: 'Ubuntu 16.04'
Expand Down
4 changes: 2 additions & 2 deletions docs/ExperimentConfig.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,10 +168,10 @@ machineList:
* __builtinTunerName__ and __classArgs__
* __builtinTunerName__

__builtinTunerName__ specifies the name of system tuner you want to use, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__}
__builtinTunerName__ specifies the name of system tuner you want to use, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__, __BatchTuner__, __GridSearch__}
* __classArgs__

__classArgs__ specifies the arguments of tuner algorithm
__classArgs__ specifies the arguments of tuner algorithm. If the __builtinTunerName__ is in {__TPE__, __Random__, __Anneal__, __Evolution__}, you should set __optimize_mode__.
* __codeDir__, __classFileName__, __className__ and __classArgs__
* __codeDir__

Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/batch_tuner/batch_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class BatchTuner(Tuner):
}
'''

def __init__(self, optimize_mode):
def __init__(self):
self.count = -1
self.values = []

Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/gridsearch_tuner/gridsearch_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class GridSearchTuner(Tuner):
and sample and then change the sampled value back.
'''

def __init__(self, optimize_mode):
def __init__(self):
self.count = -1
self.expanded_search_space = []

Expand Down
17 changes: 11 additions & 6 deletions test/sdk_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
RED = '\33[31m'
CLEAR = '\33[0m'

TUNER_LIST = ['BatchTuner', 'TPE', 'Random', 'Anneal', 'Evolution']
TUNER_LIST = ['GridSearch', 'BatchTuner', 'TPE', 'Random', 'Anneal', 'Evolution']
ASSESSOR_LIST = ['Medianstop']
EXPERIMENT_URL = 'http://localhost:8080/api/v1/nni/experiment'

Expand All @@ -38,12 +38,17 @@ def switch(dispatch_type, dispatch_name):
'''Change dispatch in config.yml'''
config_path = 'sdk_test/local.yml'
experiment_config = get_yml_content(config_path)
experiment_config[dispatch_type.lower()] = {
'builtin' + dispatch_type + 'Name': dispatch_name,
'classArgs': {
'optimize_mode': 'maximize'
if dispatch_name in ['GridSearch', 'BatchTuner']:
experiment_config[dispatch_type.lower()] = {
'builtin' + dispatch_type + 'Name': dispatch_name
}
else:
experiment_config[dispatch_type.lower()] = {
'builtin' + dispatch_type + 'Name': dispatch_name,
'classArgs': {
'optimize_mode': 'maximize'
}
}
}
dump_yml_content(config_path, experiment_config)

def test_builtin_dispatcher(dispatch_type, dispatch_name):
Expand Down
18 changes: 11 additions & 7 deletions tools/nni_cmd/config_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,13 @@
Optional('multiThread'): bool,
'useAnnotation': bool,
'tuner': Or({
'builtinTunerName': Or('TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'BatchTuner', 'GridSearch'),
'classArgs': {
'optimize_mode': Or('maximize', 'minimize'),
Optional('speed'): int
},
'builtinTunerName': Or('TPE', 'Random', 'Anneal', 'SMAC', 'Evolution'),
Optional('classArgs'): {
'optimize_mode': Or('maximize', 'minimize')
},
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
},{
'builtinTunerName': Or('BatchTuner', 'GridSearch'),
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
},{
'codeDir': os.path.exists,
Expand All @@ -49,8 +51,10 @@
}),
Optional('assessor'): Or({
'builtinAssessorName': lambda x: x in ['Medianstop'],
'classArgs': {
'optimize_mode': lambda x: x in ['maximize', 'minimize']},
Optional('classArgs'): {
Optional('optimize_mode'): Or('maximize', 'minimize'),
Optional('start_step'): And(int, lambda x: 0 <= x <= 9999)
},
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999)
},{
'codeDir': os.path.exists,
Expand Down

0 comments on commit 851955e

Please sign in to comment.