diff --git a/README.md b/README.md index 996ae0099fb..a4fc868f0a0 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,7 @@ You run the task on a "job" function, and print the result output (in this case, For more information and examples, see the [**examples/mlrun_basics.ipynb**](examples/mlrun_basics.ipynb) notebook. ```python # Create a task and set its attributes -task = new_task(handler=handler, name='demo', params={'p1': 5}) +task = NewTask(handler=handler, name='demo', params={'p1': 5}) task.with_secrets('file', 'secrets.txt').set_label('type', 'demo') run = new_function(command='myfile.py', kind='job').run(task) @@ -361,7 +361,7 @@ For example, the following code demonstrates how to use hyperparameters to run t "gamma": [0.0, 0.1, 0.2, 0.3], } - task = new_task(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy') + task = NewTask(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy') run = run_local(task) ``` @@ -377,7 +377,7 @@ mlrun run --name train_hyper -x p1="[3,7,5]" -x p2="[5,2,9]" --out-path '/User/m You can also use a parameters file if you want to control the parameter combinations or if the parameters are more complex. The following code from the example [**mlrun_basics.ipynb**](examples/mlrun_basics.ipynb) notebook demonstrates how to run a task that uses a CSV parameters file (**params.csv** in the current directory): ```python - task = new_task(handler=xgb_train).with_param_file('params.csv', 'max.accuracy') + task = NewTask(handler=xgb_train).with_param_file('params.csv', 'max.accuracy') run = run_local(task) ``` diff --git a/docs/job-submission-and-tracking.md b/docs/job-submission-and-tracking.md index 78b914dbd8b..af27bf72f79 100644 --- a/docs/job-submission-and-tracking.md +++ b/docs/job-submission-and-tracking.md @@ -72,7 +72,7 @@ You run the task on a "job" function, and print the result output (in this case, For more information and examples, see the [**Examples section**](examples.html). ```python # Create a task and set its attributes -task = new_task(handler=handler, name='demo', params={'p1': 5}) +task = NewTask(handler=handler, name='demo', params={'p1': 5}) task.with_secrets('file', 'secrets.txt').set_label('type', 'demo') run = new_function(command='myfile.py', kind='job').run(task) @@ -280,7 +280,7 @@ For example, the following code demonstrates how to use hyperparameters to run t "gamma": [0.0, 0.1, 0.2, 0.3], } - task = new_task(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy') + task = NewTask(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy') run = run_local(task) ``` @@ -296,7 +296,7 @@ mlrun run --name train_hyper -x p1="[3,7,5]" -x p2="[5,2,9]" --out-path '/User/m You can also use a parameters file if you want to control the parameter combinations or if the parameters are more complex. The following code from the [**Examples section**](examples.html) demonstrates how to run a task that uses a CSV parameters file (**params.csv** in the current directory): ```python - task = new_task(handler=xgb_train).with_param_file('params.csv', 'max.accuracy') + task = NewTask(handler=xgb_train).with_param_file('params.csv', 'max.accuracy') run = run_local(task) ``` diff --git a/examples/load-project.ipynb b/examples/load-project.ipynb index d35f2e849ab..68e5a4c3727 100644 --- a/examples/load-project.ipynb +++ b/examples/load-project.ipynb @@ -518,8 +518,8 @@ } ], "source": [ - "from mlrun import run_local, new_task\n", - "run_local(new_task(handler='iris_generator'), proj.func('xgb'), workdir='./')" + "from mlrun import run_local, NewTask\n", + "run_local(NewTask(handler='iris_generator'), proj.func('xgb'), workdir='./')" ] }, { diff --git a/examples/mlrun_basics.ipynb b/examples/mlrun_basics.ipynb index 10da85c4892..712f782f96a 100644 --- a/examples/mlrun_basics.ipynb +++ b/examples/mlrun_basics.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "from mlrun import run_local, RunTemplate, new_task, mlconf\n", + "from mlrun import run_local, RunTemplate, NewTask, mlconf\n", "from os import path\n", "mlconf.dbpath = mlconf.dbpath or './'" ] @@ -177,7 +177,7 @@ "metadata": {}, "outputs": [], "source": [ - "task = new_task(name='demo', params={'p1': 5}, artifact_path=artifact_path).with_secrets('file', 'secrets.txt').set_label('type', 'demo')" + "task = NewTask(name='demo', params={'p1': 5}, artifact_path=artifact_path).with_secrets('file', 'secrets.txt').set_label('type', 'demo')" ] }, { @@ -1723,7 +1723,7 @@ } ], "source": [ - "task = new_task(name='demo2', handler=handler, artifact_path=artifact_path).with_params(p1=7)\n", + "task = NewTask(name='demo2', handler=handler, artifact_path=artifact_path).with_params(p1=7)\n", "run = run_local(task)" ] }, @@ -2025,7 +2025,7 @@ } ], "source": [ - "task = new_task(name='demo2', handler=handler, artifact_path=artifact_path).with_param_file('params.csv', 'max.accuracy')\n", + "task = NewTask(name='demo2', handler=handler, artifact_path=artifact_path).with_param_file('params.csv', 'max.accuracy')\n", "run = run_local(task)" ] }, diff --git a/examples/mlrun_dask.ipynb b/examples/mlrun_dask.ipynb index be4c8d08bb3..88bf98173f4 100644 --- a/examples/mlrun_dask.ipynb +++ b/examples/mlrun_dask.ipynb @@ -85,7 +85,7 @@ "metadata": {}, "outputs": [], "source": [ - "from mlrun import new_function, mlconf, code_to_function, mount_v3io, new_task\n", + "from mlrun import new_function, mlconf, code_to_function, mount_v3io, NewTask\n", "#mlconf.dbpath = 'http://mlrun-api:8080'" ] }, @@ -562,7 +562,7 @@ "@dsl.pipeline(name=\"dask_pipeline\")\n", "def dask_pipe(x=1,y=10):\n", " # use_db option will use a function (DB) pointer instead of adding the function spec to the YAML\n", - " myrun = dsf.as_step(new_task(handler=hndlr, name=\"dask_pipeline\", params={'x': x, 'y': y}), use_db=True)\n", + " myrun = dsf.as_step(NewTask(handler=hndlr, name=\"dask_pipeline\", params={'x': x, 'y': y}), use_db=True)\n", " \n", " # if the step (dask client) need v3io access u should add: .apply(mount_v3io())\n", " \n", diff --git a/examples/mlrun_export_import.ipynb b/examples/mlrun_export_import.ipynb index 32279a7ba00..b2c31a7e936 100644 --- a/examples/mlrun_export_import.ipynb +++ b/examples/mlrun_export_import.ipynb @@ -234,7 +234,7 @@ "\n", "# create and run the task\n", "images_path = path.abspath('images')\n", - "open_archive_task = mlrun.new_task('download',\n", + "open_archive_task = mlrun.NewTask('download',\n", " params={'target_dir': images_path},\n", " inputs={'archive_url': 'http://iguazio-sample-data.s3.amazonaws.com/catsndogs.zip'})" ] diff --git a/examples/mlrun_jobs.ipynb b/examples/mlrun_jobs.ipynb index 1a834d62f24..ae24edb03fd 100644 --- a/examples/mlrun_jobs.ipynb +++ b/examples/mlrun_jobs.ipynb @@ -261,7 +261,7 @@ "metadata": {}, "outputs": [], "source": [ - "from mlrun import run_local, code_to_function, mlconf, new_task\n", + "from mlrun import run_local, code_to_function, mlconf, NewTask\n", "from mlrun.platforms.other import auto_mount\n", "mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080'" ] @@ -539,7 +539,7 @@ } ], "source": [ - "train_run = run_local(new_task(handler=training, params={'p1': 5}, artifact_path=out))" + "train_run = run_local(NewTask(handler=training, params={'p1': 5}, artifact_path=out))" ] }, { @@ -813,7 +813,7 @@ "source": [ "model = train_run.outputs['mymodel']\n", "\n", - "validation_run = run_local(new_task(handler=validation, inputs={'model': model}, artifact_path=out))" + "validation_run = run_local(NewTask(handler=validation, inputs={'model': model}, artifact_path=out))" ] }, { @@ -1057,7 +1057,7 @@ "outputs": [], "source": [ "# create the base task (common to both steps), and set the output path and experiment label\n", - "base_task = new_task(artifact_path=out).set_label('stage', 'dev')" + "base_task = NewTask(artifact_path=out).set_label('stage', 'dev')" ] }, { @@ -1296,7 +1296,7 @@ ], "source": [ "# run our training task, with hyper params, and select the one with max accuracy\n", - "train_task = new_task(name='my-training', handler='training', params={'p1': 9}, base=base_task)\n", + "train_task = NewTask(name='my-training', handler='training', params={'p1': 9}, base=base_task)\n", "train_run = trainer.run(train_task)" ] }, diff --git a/examples/mlrun_sparkk8s.ipynb b/examples/mlrun_sparkk8s.ipynb index 283d8fe6e78..84e1c6ea0ea 100644 --- a/examples/mlrun_sparkk8s.ipynb +++ b/examples/mlrun_sparkk8s.ipynb @@ -29,7 +29,7 @@ "source": [ "import os\n", "from os.path import isfile, join\n", - "from mlrun import new_function, new_task, mlconf\n", + "from mlrun import new_function, NewTask, mlconf\n", "\n", "#Set the mlrun database/api\n", "mlconf.dbpath = 'http://mlrun-api:8080'\n", @@ -83,7 +83,7 @@ " 'query': QUERY,\n", " 'write_options': WRITE_OPTIONS}\n", "\n", - "SPARK_TASK = new_task(params=PARAMS)" + "SPARK_TASK = NewTask(params=PARAMS)" ] }, { diff --git a/mlrun/model.py b/mlrun/model.py index 4506007a8ab..7fda89ad67b 100644 --- a/mlrun/model.py +++ b/mlrun/model.py @@ -463,7 +463,7 @@ def NewTask( """ warnings.warn( "NewTask will be deprecated in 0.7.0, and will be removed in 0.9.0, use new_task instead", - # TODO: change to FutureWarning in 0.7.0 + # TODO: In 0.7.0 and replace NewTask to new_task in examples & demos PendingDeprecationWarning, ) return new_task(