Skip to content

Commit

Permalink
new_task -> NewTask in examples (#456)
Browse files Browse the repository at this point in the history
  • Loading branch information
Hedingber committed Sep 28, 2020
1 parent 9656be4 commit bb19da3
Show file tree
Hide file tree
Showing 9 changed files with 23 additions and 23 deletions.
6 changes: 3 additions & 3 deletions README.md
Expand Up @@ -167,7 +167,7 @@ You run the task on a "job" function, and print the result output (in this case,
For more information and examples, see the [**examples/mlrun_basics.ipynb**](examples/mlrun_basics.ipynb) notebook.
```python
# Create a task and set its attributes
task = new_task(handler=handler, name='demo', params={'p1': 5})
task = NewTask(handler=handler, name='demo', params={'p1': 5})
task.with_secrets('file', 'secrets.txt').set_label('type', 'demo')

run = new_function(command='myfile.py', kind='job').run(task)
Expand Down Expand Up @@ -361,7 +361,7 @@ For example, the following code demonstrates how to use hyperparameters to run t
"gamma": [0.0, 0.1, 0.2, 0.3],
}

task = new_task(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy')
task = NewTask(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy')
run = run_local(task)
```

Expand All @@ -377,7 +377,7 @@ mlrun run --name train_hyper -x p1="[3,7,5]" -x p2="[5,2,9]" --out-path '/User/m
You can also use a parameters file if you want to control the parameter combinations or if the parameters are more complex.
The following code from the example [**mlrun_basics.ipynb**](examples/mlrun_basics.ipynb) notebook demonstrates how to run a task that uses a CSV parameters file (**params.csv** in the current directory):
```python
task = new_task(handler=xgb_train).with_param_file('params.csv', 'max.accuracy')
task = NewTask(handler=xgb_train).with_param_file('params.csv', 'max.accuracy')
run = run_local(task)
```

Expand Down
6 changes: 3 additions & 3 deletions docs/job-submission-and-tracking.md
Expand Up @@ -72,7 +72,7 @@ You run the task on a "job" function, and print the result output (in this case,
For more information and examples, see the [**Examples section**](examples.html).
```python
# Create a task and set its attributes
task = new_task(handler=handler, name='demo', params={'p1': 5})
task = NewTask(handler=handler, name='demo', params={'p1': 5})
task.with_secrets('file', 'secrets.txt').set_label('type', 'demo')

run = new_function(command='myfile.py', kind='job').run(task)
Expand Down Expand Up @@ -280,7 +280,7 @@ For example, the following code demonstrates how to use hyperparameters to run t
"gamma": [0.0, 0.1, 0.2, 0.3],
}

task = new_task(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy')
task = NewTask(handler=xgb_train, out_path='/User/mlrun/data').with_hyper_params(parameters, 'max.accuracy')
run = run_local(task)
```

Expand All @@ -296,7 +296,7 @@ mlrun run --name train_hyper -x p1="[3,7,5]" -x p2="[5,2,9]" --out-path '/User/m
You can also use a parameters file if you want to control the parameter combinations or if the parameters are more complex.
The following code from the [**Examples section**](examples.html) demonstrates how to run a task that uses a CSV parameters file (**params.csv** in the current directory):
```python
task = new_task(handler=xgb_train).with_param_file('params.csv', 'max.accuracy')
task = NewTask(handler=xgb_train).with_param_file('params.csv', 'max.accuracy')
run = run_local(task)
```

Expand Down
4 changes: 2 additions & 2 deletions examples/load-project.ipynb
Expand Up @@ -518,8 +518,8 @@
}
],
"source": [
"from mlrun import run_local, new_task\n",
"run_local(new_task(handler='iris_generator'), proj.func('xgb'), workdir='./')"
"from mlrun import run_local, NewTask\n",
"run_local(NewTask(handler='iris_generator'), proj.func('xgb'), workdir='./')"
]
},
{
Expand Down
8 changes: 4 additions & 4 deletions examples/mlrun_basics.ipynb
Expand Up @@ -97,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
"from mlrun import run_local, RunTemplate, new_task, mlconf\n",
"from mlrun import run_local, RunTemplate, NewTask, mlconf\n",
"from os import path\n",
"mlconf.dbpath = mlconf.dbpath or './'"
]
Expand Down Expand Up @@ -177,7 +177,7 @@
"metadata": {},
"outputs": [],
"source": [
"task = new_task(name='demo', params={'p1': 5}, artifact_path=artifact_path).with_secrets('file', 'secrets.txt').set_label('type', 'demo')"
"task = NewTask(name='demo', params={'p1': 5}, artifact_path=artifact_path).with_secrets('file', 'secrets.txt').set_label('type', 'demo')"
]
},
{
Expand Down Expand Up @@ -1723,7 +1723,7 @@
}
],
"source": [
"task = new_task(name='demo2', handler=handler, artifact_path=artifact_path).with_params(p1=7)\n",
"task = NewTask(name='demo2', handler=handler, artifact_path=artifact_path).with_params(p1=7)\n",
"run = run_local(task)"
]
},
Expand Down Expand Up @@ -2025,7 +2025,7 @@
}
],
"source": [
"task = new_task(name='demo2', handler=handler, artifact_path=artifact_path).with_param_file('params.csv', 'max.accuracy')\n",
"task = NewTask(name='demo2', handler=handler, artifact_path=artifact_path).with_param_file('params.csv', 'max.accuracy')\n",
"run = run_local(task)"
]
},
Expand Down
4 changes: 2 additions & 2 deletions examples/mlrun_dask.ipynb
Expand Up @@ -85,7 +85,7 @@
"metadata": {},
"outputs": [],
"source": [
"from mlrun import new_function, mlconf, code_to_function, mount_v3io, new_task\n",
"from mlrun import new_function, mlconf, code_to_function, mount_v3io, NewTask\n",
"#mlconf.dbpath = 'http://mlrun-api:8080'"
]
},
Expand Down Expand Up @@ -562,7 +562,7 @@
"@dsl.pipeline(name=\"dask_pipeline\")\n",
"def dask_pipe(x=1,y=10):\n",
" # use_db option will use a function (DB) pointer instead of adding the function spec to the YAML\n",
" myrun = dsf.as_step(new_task(handler=hndlr, name=\"dask_pipeline\", params={'x': x, 'y': y}), use_db=True)\n",
" myrun = dsf.as_step(NewTask(handler=hndlr, name=\"dask_pipeline\", params={'x': x, 'y': y}), use_db=True)\n",
" \n",
" # if the step (dask client) need v3io access u should add: .apply(mount_v3io())\n",
" \n",
Expand Down
2 changes: 1 addition & 1 deletion examples/mlrun_export_import.ipynb
Expand Up @@ -234,7 +234,7 @@
"\n",
"# create and run the task\n",
"images_path = path.abspath('images')\n",
"open_archive_task = mlrun.new_task('download',\n",
"open_archive_task = mlrun.NewTask('download',\n",
" params={'target_dir': images_path},\n",
" inputs={'archive_url': 'http://iguazio-sample-data.s3.amazonaws.com/catsndogs.zip'})"
]
Expand Down
10 changes: 5 additions & 5 deletions examples/mlrun_jobs.ipynb
Expand Up @@ -261,7 +261,7 @@
"metadata": {},
"outputs": [],
"source": [
"from mlrun import run_local, code_to_function, mlconf, new_task\n",
"from mlrun import run_local, code_to_function, mlconf, NewTask\n",
"from mlrun.platforms.other import auto_mount\n",
"mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080'"
]
Expand Down Expand Up @@ -539,7 +539,7 @@
}
],
"source": [
"train_run = run_local(new_task(handler=training, params={'p1': 5}, artifact_path=out))"
"train_run = run_local(NewTask(handler=training, params={'p1': 5}, artifact_path=out))"
]
},
{
Expand Down Expand Up @@ -813,7 +813,7 @@
"source": [
"model = train_run.outputs['mymodel']\n",
"\n",
"validation_run = run_local(new_task(handler=validation, inputs={'model': model}, artifact_path=out))"
"validation_run = run_local(NewTask(handler=validation, inputs={'model': model}, artifact_path=out))"
]
},
{
Expand Down Expand Up @@ -1057,7 +1057,7 @@
"outputs": [],
"source": [
"# create the base task (common to both steps), and set the output path and experiment label\n",
"base_task = new_task(artifact_path=out).set_label('stage', 'dev')"
"base_task = NewTask(artifact_path=out).set_label('stage', 'dev')"
]
},
{
Expand Down Expand Up @@ -1296,7 +1296,7 @@
],
"source": [
"# run our training task, with hyper params, and select the one with max accuracy\n",
"train_task = new_task(name='my-training', handler='training', params={'p1': 9}, base=base_task)\n",
"train_task = NewTask(name='my-training', handler='training', params={'p1': 9}, base=base_task)\n",
"train_run = trainer.run(train_task)"
]
},
Expand Down
4 changes: 2 additions & 2 deletions examples/mlrun_sparkk8s.ipynb
Expand Up @@ -29,7 +29,7 @@
"source": [
"import os\n",
"from os.path import isfile, join\n",
"from mlrun import new_function, new_task, mlconf\n",
"from mlrun import new_function, NewTask, mlconf\n",
"\n",
"#Set the mlrun database/api\n",
"mlconf.dbpath = 'http://mlrun-api:8080'\n",
Expand Down Expand Up @@ -83,7 +83,7 @@
" 'query': QUERY,\n",
" 'write_options': WRITE_OPTIONS}\n",
"\n",
"SPARK_TASK = new_task(params=PARAMS)"
"SPARK_TASK = NewTask(params=PARAMS)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion mlrun/model.py
Expand Up @@ -463,7 +463,7 @@ def NewTask(
"""
warnings.warn(
"NewTask will be deprecated in 0.7.0, and will be removed in 0.9.0, use new_task instead",
# TODO: change to FutureWarning in 0.7.0
# TODO: In 0.7.0 and replace NewTask to new_task in examples & demos
PendingDeprecationWarning,
)
return new_task(
Expand Down

0 comments on commit bb19da3

Please sign in to comment.