Skip to content

Commit

Permalink
Ravin Kohli: Merge pull request #84 from franchuterivera/refactor_dev…
Browse files Browse the repository at this point in the history
…elopment_loggermsg
  • Loading branch information
Github Actions committed Feb 22, 2021
1 parent 53e9ddd commit 07d7892
Show file tree
Hide file tree
Showing 14 changed files with 75 additions and 112 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -82,3 +82,4 @@ def get_search_space_updates():
y_pred = api.predict(X_test)
score = api.score(y_pred, y_test)
print(score)
print(api.show_models())
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
},
"outputs": [],
"source": [
"import os\nimport tempfile as tmp\nimport warnings\n\nos.environ['JOBLIB_TEMP_FOLDER'] = tmp.gettempdir()\nos.environ['OMP_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_NUM_THREADS'] = '1'\n\nwarnings.simplefilter(action='ignore', category=UserWarning)\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport sklearn.datasets\nimport sklearn.model_selection\n\nfrom autoPyTorch.api.tabular_classification import TabularClassificationTask\nfrom autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates\n\n\ndef get_search_space_updates():\n \"\"\"\n Search space updates to the task can be added using HyperparameterSearchSpaceUpdates\n Returns:\n HyperparameterSearchSpaceUpdates\n \"\"\"\n updates = HyperparameterSearchSpaceUpdates()\n updates.append(node_name=\"data_loader\",\n hyperparameter=\"batch_size\",\n value_range=[16, 512],\n default_value=32)\n updates.append(node_name=\"lr_scheduler\",\n hyperparameter=\"CosineAnnealingLR:T_max\",\n value_range=[50, 60],\n default_value=55)\n updates.append(node_name='network_backbone',\n hyperparameter='ResNetBackbone:dropout',\n value_range=[0, 0.5],\n default_value=0.2)\n return updates\n\n\nif __name__ == '__main__':\n ############################################################################\n # Data Loading\n # ============\n X, y = sklearn.datasets.fetch_openml(data_id=40981, return_X_y=True, as_frame=True)\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(\n X,\n y,\n random_state=1,\n )\n\n ############################################################################\n # Build and fit a classifier\n # ==========================\n api = TabularClassificationTask(\n delete_tmp_folder_after_terminate=False,\n search_space_updates=get_search_space_updates()\n )\n api.search(\n X_train=X_train,\n y_train=y_train,\n X_test=X_test.copy(),\n y_test=y_test.copy(),\n optimize_metric='accuracy',\n total_walltime_limit=500,\n func_eval_time_limit=50\n )\n\n ############################################################################\n # Print the final ensemble performance\n # ====================================\n print(api.run_history, api.trajectory)\n y_pred = api.predict(X_test)\n score = api.score(y_pred, y_test)\n print(score)"
"import os\nimport tempfile as tmp\nimport warnings\n\nos.environ['JOBLIB_TEMP_FOLDER'] = tmp.gettempdir()\nos.environ['OMP_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_NUM_THREADS'] = '1'\n\nwarnings.simplefilter(action='ignore', category=UserWarning)\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport sklearn.datasets\nimport sklearn.model_selection\n\nfrom autoPyTorch.api.tabular_classification import TabularClassificationTask\nfrom autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates\n\n\ndef get_search_space_updates():\n \"\"\"\n Search space updates to the task can be added using HyperparameterSearchSpaceUpdates\n Returns:\n HyperparameterSearchSpaceUpdates\n \"\"\"\n updates = HyperparameterSearchSpaceUpdates()\n updates.append(node_name=\"data_loader\",\n hyperparameter=\"batch_size\",\n value_range=[16, 512],\n default_value=32)\n updates.append(node_name=\"lr_scheduler\",\n hyperparameter=\"CosineAnnealingLR:T_max\",\n value_range=[50, 60],\n default_value=55)\n updates.append(node_name='network_backbone',\n hyperparameter='ResNetBackbone:dropout',\n value_range=[0, 0.5],\n default_value=0.2)\n return updates\n\n\nif __name__ == '__main__':\n ############################################################################\n # Data Loading\n # ============\n X, y = sklearn.datasets.fetch_openml(data_id=40981, return_X_y=True, as_frame=True)\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(\n X,\n y,\n random_state=1,\n )\n\n ############################################################################\n # Build and fit a classifier\n # ==========================\n api = TabularClassificationTask(\n delete_tmp_folder_after_terminate=False,\n search_space_updates=get_search_space_updates()\n )\n api.search(\n X_train=X_train,\n y_train=y_train,\n X_test=X_test.copy(),\n y_test=y_test.copy(),\n optimize_metric='accuracy',\n total_walltime_limit=500,\n func_eval_time_limit=50\n )\n\n ############################################################################\n # Print the final ensemble performance\n # ====================================\n print(api.run_history, api.trajectory)\n y_pred = api.predict(X_test)\n score = api.score(y_pred, y_test)\n print(score)\n print(api.show_models())"
]
}
],
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -77,21 +77,23 @@ Image Classification
Pipeline Random Config:
________________________________________
Configuration:
image_augmenter:GaussianBlur:use_augmenter, Value: False
image_augmenter:GaussianBlur:sigma_min, Value: 0.8480354370981031
image_augmenter:GaussianBlur:sigma_offset, Value: 1.8662941407999276
image_augmenter:GaussianBlur:use_augmenter, Value: True
image_augmenter:GaussianNoise:use_augmenter, Value: False
image_augmenter:RandomAffine:use_augmenter, Value: False
image_augmenter:RandomCutout:p, Value: 0.5586693024569416
image_augmenter:RandomCutout:p, Value: 0.3656551312711466
image_augmenter:RandomCutout:use_augmenter, Value: True
image_augmenter:Resize:use_augmenter, Value: False
image_augmenter:ZeroPadAndCrop:percent, Value: 0.4581846771624755
normalizer:__choice__, Value: 'ImageNormalizer'
image_augmenter:ZeroPadAndCrop:percent, Value: 0.4929042351991033
normalizer:__choice__, Value: 'NoNormalizer'

Fitting the pipeline...
________________________________________
ImageClassificationPipeline
________________________________________
0-) normalizer:
ImageNormalizer
NoNormalizer

1-) preprocessing:
EarlyPreprocessing
Expand Down Expand Up @@ -163,7 +165,7 @@ Image Classification
.. rst-class:: sphx-glr-timing

**Total running time of the script:** ( 0 minutes 5.798 seconds)
**Total running time of the script:** ( 0 minutes 8.334 seconds)


.. _sphx_glr_download_examples_example_image_classification.py:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Tabular Classification
The following example shows how to fit a sample classification model
with AutoPyTorch

.. GENERATED FROM PYTHON SOURCE LINES 9-85
.. GENERATED FROM PYTHON SOURCE LINES 9-86
Expand All @@ -36,7 +36,7 @@ with AutoPyTorch

.. code-block:: none
<smac.runhistory.runhistory.RunHistory object at 0x7f3fac518ee0> [TrajEntry(train_perf=2147483648, incumbent_id=1, incumbent=Configuration:
<smac.runhistory.runhistory.RunHistory object at 0x7f66907c4640> [TrajEntry(train_perf=2147483648, incumbent_id=1, incumbent=Configuration:
data_loader:batch_size, Value: 32
encoder:__choice__, Value: 'OneHotEncoder'
feature_preprocessor:__choice__, Value: 'NoFeaturePreprocessor'
Expand Down Expand Up @@ -67,7 +67,7 @@ with AutoPyTorch
scaler:__choice__, Value: 'StandardScaler'
trainer:StandardTrainer:weighted_loss, Value: True
trainer:__choice__, Value: 'StandardTrainer'
, ta_runs=0, ta_time_used=0.0, wallclock_time=0.001955747604370117, budget=0), TrajEntry(train_perf=0.1578947368421053, incumbent_id=1, incumbent=Configuration:
, ta_runs=0, ta_time_used=0.0, wallclock_time=0.002040863037109375, budget=0), TrajEntry(train_perf=0.14619883040935677, incumbent_id=1, incumbent=Configuration:
data_loader:batch_size, Value: 32
encoder:__choice__, Value: 'OneHotEncoder'
feature_preprocessor:__choice__, Value: 'NoFeaturePreprocessor'
Expand Down Expand Up @@ -98,43 +98,21 @@ with AutoPyTorch
scaler:__choice__, Value: 'StandardScaler'
trainer:StandardTrainer:weighted_loss, Value: True
trainer:__choice__, Value: 'StandardTrainer'
, ta_runs=1, ta_time_used=4.385937213897705, wallclock_time=5.783432245254517, budget=5.555555555555555), TrajEntry(train_perf=0.15204678362573099, incumbent_id=2, incumbent=Configuration:
data_loader:batch_size, Value: 426
encoder:__choice__, Value: 'OrdinalEncoder'
feature_preprocessor:Nystroem:coef0, Value: -0.4848435864342966
feature_preprocessor:Nystroem:kernel, Value: 'sigmoid'
feature_preprocessor:Nystroem:n_components, Value: 4
feature_preprocessor:__choice__, Value: 'Nystroem'
imputer:categorical_strategy, Value: 'constant_!missing!'
imputer:numerical_strategy, Value: 'most_frequent'
lr_scheduler:CosineAnnealingLR:T_max, Value: 56
lr_scheduler:__choice__, Value: 'CosineAnnealingLR'
network_backbone:ShapedMLPBackbone:activation, Value: 'tanh'
network_backbone:ShapedMLPBackbone:max_units, Value: 999
network_backbone:ShapedMLPBackbone:mlp_shape, Value: 'triangle'
network_backbone:ShapedMLPBackbone:num_groups, Value: 11
network_backbone:ShapedMLPBackbone:output_dim, Value: 477
network_backbone:ShapedMLPBackbone:use_dropout, Value: False
network_backbone:__choice__, Value: 'ShapedMLPBackbone'
network_head:__choice__, Value: 'fully_connected'
network_head:fully_connected:activation, Value: 'tanh'
network_head:fully_connected:num_layers, Value: 4
network_head:fully_connected:units_layer_1, Value: 189
network_head:fully_connected:units_layer_2, Value: 259
network_head:fully_connected:units_layer_3, Value: 494
network_init:OrthogonalInit:bias_strategy, Value: 'Zero'
network_init:__choice__, Value: 'OrthogonalInit'
optimizer:AdamOptimizer:beta1, Value: 0.8999825268789966
optimizer:AdamOptimizer:beta2, Value: 0.9200091936462466
optimizer:AdamOptimizer:lr, Value: 0.0006438744148679775
optimizer:AdamOptimizer:weight_decay, Value: 0.03262472357115608
optimizer:__choice__, Value: 'AdamOptimizer'
scaler:__choice__, Value: 'NoScaler'
trainer:MixUpTrainer:alpha, Value: 0.08759596707798334
trainer:MixUpTrainer:weighted_loss, Value: False
trainer:__choice__, Value: 'MixUpTrainer'
, ta_runs=33, ta_time_used=385.77361822128296, wallclock_time=484.24447774887085, budget=50.0)]
, ta_runs=1, ta_time_used=4.407814264297485, wallclock_time=5.844383239746094, budget=5.555555555555555)]
{'accuracy': 0.861271676300578}
| | Preprocessing | Estimator | Weight |
|---:|:------------------------------------------------------------------|:------------------------------------------------------|---------:|
| 0 | None | RFClassifier | 0.26 |
| 1 | None | ExtraTreesClassifier | 0.22 |
| 2 | SimpleImputer,OrdinalEncoder,NoScaler,Nystroem | ShapedMLPBackbone,FullyConnectedHead,nn.Sequential | 0.14 |
| 3 | SimpleImputer,OneHotEncoder,StandardScaler,NoFeaturePreprocessing | ShapedMLPBackbone,FullyConnectedHead,nn.Sequential | 0.14 |
| 4 | SimpleImputer,OrdinalEncoder,Normalizer,PowerTransformer | MLPBackbone,FullyConnectedHead,nn.Sequential | 0.06 |
| 5 | None | SVC | 0.06 |
| 6 | None | KNNClassifier | 0.04 |
| 7 | SimpleImputer,OrdinalEncoder,Normalizer,PowerTransformer | MLPBackbone,FullyConnectedHead,nn.Sequential | 0.02 |
| 8 | SimpleImputer,OrdinalEncoder,MinMaxScaler,KitchenSink | ShapedResNetBackbone,FullyConnectedHead,nn.Sequential | 0.02 |
| 9 | SimpleImputer,OrdinalEncoder,MinMaxScaler,KitchenSink | ShapedResNetBackbone,FullyConnectedHead,nn.Sequential | 0.02 |
| 10 | SimpleImputer,OneHotEncoder,StandardScaler,NoFeaturePreprocessing | ShapedMLPBackbone,FullyConnectedHead,nn.Sequential | 0.02 |
Expand Down Expand Up @@ -221,11 +199,12 @@ with AutoPyTorch
y_pred = api.predict(X_test)
score = api.score(y_pred, y_test)
print(score)
print(api.show_models())
.. rst-class:: sphx-glr-timing

**Total running time of the script:** ( 9 minutes 13.196 seconds)
**Total running time of the script:** ( 9 minutes 9.069 seconds)


.. _sphx_glr_download_examples_example_tabular_classification.py:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ with AutoPyTorch

.. code-block:: none
<smac.runhistory.runhistory.RunHistory object at 0x7f3f94e1f760> [TrajEntry(train_perf=2147483648, incumbent_id=1, incumbent=Configuration:
<smac.runhistory.runhistory.RunHistory object at 0x7f66821e6b50> [TrajEntry(train_perf=2147483648, incumbent_id=1, incumbent=Configuration:
data_loader:batch_size, Value: 32
encoder:__choice__, Value: 'NoEncoder'
imputer:numerical_strategy, Value: 'mean'
Expand Down Expand Up @@ -64,7 +64,7 @@ with AutoPyTorch
optimizer:__choice__, Value: 'AdamOptimizer'
scaler:__choice__, Value: 'StandardScaler'
trainer:__choice__, Value: 'StandardTrainer'
, ta_runs=0, ta_time_used=0.0, wallclock_time=0.0017769336700439453, budget=0), TrajEntry(train_perf=0.00043087196655655635, incumbent_id=1, incumbent=Configuration:
, ta_runs=0, ta_time_used=0.0, wallclock_time=0.0017652511596679688, budget=0), TrajEntry(train_perf=0.005070456150329972, incumbent_id=1, incumbent=Configuration:
data_loader:batch_size, Value: 32
encoder:__choice__, Value: 'NoEncoder'
imputer:numerical_strategy, Value: 'mean'
Expand Down Expand Up @@ -92,8 +92,8 @@ with AutoPyTorch
optimizer:__choice__, Value: 'AdamOptimizer'
scaler:__choice__, Value: 'StandardScaler'
trainer:__choice__, Value: 'StandardTrainer'
, ta_runs=1, ta_time_used=7.4215779304504395, wallclock_time=10.76639461517334, budget=5.555555555555555)]
{'r2': 0.9998200109852567}
, ta_runs=1, ta_time_used=7.408640384674072, wallclock_time=10.797286748886108, budget=5.555555555555555)]
{'r2': 0.9996077661516851}
Expand Down Expand Up @@ -216,7 +216,7 @@ with AutoPyTorch
.. rst-class:: sphx-glr-timing

**Total running time of the script:** ( 8 minutes 26.163 seconds)
**Total running time of the script:** ( 8 minutes 28.738 seconds)


.. _sphx_glr_download_examples_example_tabular_regression.py:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@

Computation times
=================
**17:45.156** total execution time for **examples** files:
**17:46.141** total execution time for **examples** files:

+----------------------------------------------------------------------------------------------------+-----------+--------+
| :ref:`sphx_glr_examples_example_tabular_classification.py` (``example_tabular_classification.py``) | 09:13.196 | 0.0 MB |
| :ref:`sphx_glr_examples_example_tabular_classification.py` (``example_tabular_classification.py``) | 09:09.069 | 0.0 MB |
+----------------------------------------------------------------------------------------------------+-----------+--------+
| :ref:`sphx_glr_examples_example_tabular_regression.py` (``example_tabular_regression.py``) | 08:26.163 | 0.0 MB |
| :ref:`sphx_glr_examples_example_tabular_regression.py` (``example_tabular_regression.py``) | 08:28.738 | 0.0 MB |
+----------------------------------------------------------------------------------------------------+-----------+--------+
| :ref:`sphx_glr_examples_example_image_classification.py` (``example_image_classification.py``) | 00:05.798 | 0.0 MB |
| :ref:`sphx_glr_examples_example_image_classification.py` (``example_image_classification.py``) | 00:08.334 | 0.0 MB |
+----------------------------------------------------------------------------------------------------+-----------+--------+
14 changes: 8 additions & 6 deletions refactor_development/examples/example_image_classification.html
Original file line number Diff line number Diff line change
Expand Up @@ -3939,21 +3939,23 @@
Pipeline Random Config:
________________________________________
Configuration:
image_augmenter:GaussianBlur:use_augmenter, Value: False
image_augmenter:GaussianBlur:sigma_min, Value: 0.8480354370981031
image_augmenter:GaussianBlur:sigma_offset, Value: 1.8662941407999276
image_augmenter:GaussianBlur:use_augmenter, Value: True
image_augmenter:GaussianNoise:use_augmenter, Value: False
image_augmenter:RandomAffine:use_augmenter, Value: False
image_augmenter:RandomCutout:p, Value: 0.5586693024569416
image_augmenter:RandomCutout:p, Value: 0.3656551312711466
image_augmenter:RandomCutout:use_augmenter, Value: True
image_augmenter:Resize:use_augmenter, Value: False
image_augmenter:ZeroPadAndCrop:percent, Value: 0.4581846771624755
normalizer:__choice__, Value: &#39;ImageNormalizer&#39;
image_augmenter:ZeroPadAndCrop:percent, Value: 0.4929042351991033
normalizer:__choice__, Value: &#39;NoNormalizer&#39;

Fitting the pipeline...
________________________________________
ImageClassificationPipeline
________________________________________
0-) normalizer:
ImageNormalizer
NoNormalizer

1-) preprocessing:
EarlyPreprocessing
Expand Down Expand Up @@ -4018,7 +4020,7 @@
<span class="nb">print</span><span class="p">(</span><span class="n">pipeline</span><span class="p">)</span>
</pre></div>
</div>
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 0 minutes 5.798 seconds)</p>
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 0 minutes 8.334 seconds)</p>
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-examples-example-image-classification-py">
<div class="binder-badge docutils container">
<a class="reference external image-reference" href="https://mybinder.org/v2/gh/automl/Auto-PyTorch/refactor_development?urlpath=lab/tree/notebooks/examples/example_image_classification.ipynb"><img alt="Launch binder" src="../_images/binder_badge_logo.svg" width="150px" /></a>
Expand Down

0 comments on commit 07d7892

Please sign in to comment.