Skip to content

Commit

Permalink
[Fix] long running regression (#272)
Browse files Browse the repository at this point in the history
* Fix long running regression

* Fix metric append

* Remove print

* Update autoPyTorch/pipeline/components/training/trainer/__init__.py

Co-authored-by: Ravin Kohli <13005107+ravinkohli@users.noreply.github.com>
  • Loading branch information
franchuterivera and ravinkohli committed Jun 30, 2021
1 parent 8237f2c commit 54aab63
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
12 changes: 6 additions & 6 deletions autoPyTorch/pipeline/components/training/trainer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,15 +249,15 @@ def _fit(self, X: Dict[str, Any], y: Any = None, **kwargs: Any) -> 'TrainerChoic
)

# Support additional user metrics
additional_metrics = X['additional_metrics'] if 'additional_metrics' in X else None
if 'optimize_metric' in X:
additional_metrics = additional_metrics.append(X['optimize_metric']) if additional_metrics is not None \
else [X['optimize_metric']]
metrics = get_metrics(dataset_properties=X['dataset_properties'])
if 'additional_metrics' in X:
metrics.extend(get_metrics(dataset_properties=X['dataset_properties'], names=X['additional_metrics']))
if 'optimize_metric' in X and X['optimize_metric'] not in [m.name for m in metrics]:
metrics.extend(get_metrics(dataset_properties=X['dataset_properties'], names=[X['optimize_metric']]))
additional_losses = X['additional_losses'] if 'additional_losses' in X else None
self.choice.prepare(
model=X['network'],
metrics=get_metrics(dataset_properties=X['dataset_properties'],
names=additional_metrics),
metrics=metrics,
criterion=get_loss(X['dataset_properties'],
name=additional_losses),
budget_tracker=budget_tracker,
Expand Down
2 changes: 1 addition & 1 deletion cicd/test_preselected_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def test_can_properly_fit_a_config(openml_task_id, configuration, scorer, lower_
train_data, target_data = fit_dictionary['backend'].load_datamanager().train_tensors
predictions = pipeline.predict(train_data[val_indices])
score = scorer(fit_dictionary['y_train'][val_indices], predictions)
assert pytest.approx(score) >= lower_bound_score
assert score >= lower_bound_score

# Check that we reverted to the best score
run_summary = pipeline.named_steps['trainer'].run_summary
Expand Down

0 comments on commit 54aab63

Please sign in to comment.