diff --git a/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced-mlflow.ipynb b/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced-mlflow.ipynb index 02902c19af..fcd91207cc 100644 --- a/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced-mlflow.ipynb +++ b/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced-mlflow.ipynb @@ -244,7 +244,7 @@ "|**target_column_name**|The name of the label column.|\n", "|**primary_metric**|This is the metric that you want to optimize.
Forecasting supports the following primary metrics
spearman_correlation
normalized_root_mean_squared_error
r2_score
normalized_mean_absolute_error|\n", "|**training_data**|The training data to be used for this experiment. You can use a registered MLTable in the workspace using the format `:` OR you can use a local file or folder as a MLTable. For e.g `Input(mltable='my_mltable:1')` OR `Input(mltable=MLTable(local_path=\"./data\"))` The parameter 'training_data' must always be provided.|\n", - "|**compute**|The compute on which the AutoML job will run. In this example we are using a compute called 'cpu-cluster' present in the workspace. You can replace it with any other compute in the workspace.|\n", + "|**compute**|The compute on which the AutoML job will run. In this example we are using a compute called 'energy-cluster-v2' present in the workspace. You can replace it with any other compute in the workspace.|\n", "|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or, users could specify an integer value.|\n", "|**name**|The name of the Job/Run. This is an optional property. If not specified, a random name will be generated.\n", "|**experiment_name**|The name of the Experiment. An Experiment is like a folder with multiple runs in Azure ML Workspace that should be related to the same logical machine learning experiment. For example, if a user runs this notebook multiple times, there will be multiple runs associated with the same Experiment name.|\n", diff --git a/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced.ipynb b/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced.ipynb index 6b7814e7c3..021ec57a0a 100644 --- a/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced.ipynb +++ b/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced.ipynb @@ -172,7 +172,7 @@ "- `training_data` - The data to be used for training. It should contain both training feature columns and a target column. Optionally, this data can be split for segregating a validation or test dataset. \n", "You can use a registered MLTable in the workspace using the format ':' OR you can use a local file or folder as a MLTable. For e.g Input(mltable='my_mltable:1') OR Input(mltable=MLTable(local_path=\"./data\"))\n", "The parameter 'training_data' must always be provided.\n", - "- `compute` - The compute on which the AutoML job will run. In this example we are using a compute called 'cpu-cluster' present in the workspace. You can replace it any other compute in the workspace. \n", + "- `compute` - The compute on which the AutoML job will run. In this example we are using a compute called 'adv-energy-cluster-v2' present in the workspace. You can replace it any other compute in the workspace. \n", "- `name` - The name of the Job/Run. This is an optional property. If not specified, a random name will be generated.\n", "- `experiment_name` - The name of the Experiment. An Experiment is like a folder with multiple runs in Azure ML Workspace that should be related to the same logical machine learning experiment.\n", "\n", @@ -228,7 +228,7 @@ "outputs": [], "source": [ "# general job parameters\n", - "compute_name = \"cpu-cluster\"\n", + "compute_name = \"adv-energy-cluster-v2\"\n", "max_trials = 5\n", "exp_name = \"dpv2-forecasting-experiment\"" ] diff --git a/sdk/python/notebooks_config.ini b/sdk/python/notebooks_config.ini index e22e4be9fb..7e2c191ecd 100644 --- a/sdk/python/notebooks_config.ini +++ b/sdk/python/notebooks_config.ini @@ -16,7 +16,7 @@ COMPUTE_NAMES = bike-share-v2 [automl-forecasting-task-energy-demand-advanced] USE_FORECAST_REQUIREMENTS = 0 -COMPUTE_NAMES = cpu-cluster +COMPUTE_NAMES = adv-energy-cluster-v2 [automl-forecasting-task-energy-demand-advanced-mlflow] USE_FORECAST_REQUIREMENTS = 1