diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml new file mode 100644 index 00000000..6d72ef3f --- /dev/null +++ b/.azure-pipelines/azure-pipelines.yml @@ -0,0 +1,76 @@ +trigger: +- master + +pool: + vmImage: 'ubuntu-16.04' + +container: ninhu/r-sdk-build:latest + +variables: +- name: TEST_LOCATION + value: eastus2 +- name: TEST_RESOURCE_GROUP + value: static_resources_r_sdk +- name: TEST_SUBSCRIPTION_ID + value: 4faaaf21-663f-4391-96fd-47197c630979 +- name: TEST_WORKSPACE_NAME + value: r_sdk_workspace +- name: TEST_BUILD_NUMBER + value: $(Build.BuildNumber) + +steps: + - task: Bash@3 + inputs: + targetType: 'inline' + script: | + Rscript './.azure-pipelines/scripts/validate_copyright_header.R' 'R' + displayName: 'Validate copyright header' + + - task: Bash@3 + inputs: + targetType: 'inline' + script: | + pip install --user azureml-sdk + R -e 'dir.create(Sys.getenv("R_LIBS_USER"), recursive = TRUE); + .libPaths(Sys.getenv("R_LIBS_USER")); + # build and install R sdk + devtools::install_deps(upgrade = FALSE); + package_location <- devtools::build(); + install.packages(package_location, repos = NULL)' + displayName: 'Build and Install SDK' + + - task: Bash@3 + inputs: + targetType: 'inline' + script: | + Rscript './.azure-pipelines/scripts/check_code_style.R' 'R' + displayName: 'Check code style' + + - task: Bash@3 + inputs: + targetType: 'inline' + script: | + R -e 'library("rcmdcheck"); + check_results <- rcmdcheck(".", args = c("--no-manual", "--no-tests")); + stopifnot(length(check_results$errors) == 0); + stopifnot(length(check_results$warnings) == 0)' + displayName: 'Check package build' + + - task: AzureCLI@1 + inputs: + azureSubscription: 'Project Vienna Build Tests (4faaaf21-663f-4391-96fd-47197c630979)' + scriptLocation: inlineScript + inlineScript: | + python -c "from azureml._base_sdk_common.common import perform_interactive_login; perform_interactive_login(username='$servicePrincipalId', password='$servicePrincipalKey', service_principal=True, tenant='$tenantId')" + R -e '# needed to load all non exported packages for testing + devtools::load_all(); + options(testthat.output_file = "TEST-ALL.xml"); + testthat::test_dir("tests/testthat", reporter = "junit")' + addSpnToEnvironment: true + displayName: 'Run R SDK Tests' + + - task: PublishTestResults@2 + inputs: + testResultsFormat: 'JUnit' + testResultsFiles: '**/TEST-*.xml' + failTaskOnFailedTests: true diff --git a/.azure-pipelines/docker/Dockerfile b/.azure-pipelines/docker/Dockerfile new file mode 100644 index 00000000..69a2bacd --- /dev/null +++ b/.azure-pipelines/docker/Dockerfile @@ -0,0 +1,25 @@ +FROM ubuntu:16.04 + +USER root:root +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 +ENV DEBIAN_FRONTEND noninteractive + +# Miniconda +ENV MINICONDA_VERSION 4.5.11 +ENV PATH /opt/miniconda/bin:$PATH +RUN apt-get update && apt-get install -y bzip2 wget git libxrender1 && \ + wget -qO /tmp/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh && \ + bash /tmp/miniconda.sh -bf -p /opt/miniconda && \ + conda clean -ay && \ + rm -rf /opt/miniconda/pkgs && \ + rm /tmp/miniconda.sh && \ + find / -type d -name __pycache__ | xargs rm -rf + +RUN conda install -c r -y r-essentials r-devtools r-testthat r-reticulate && conda clean -ay && \ + pip install azureml-defaults + +# Azure CLI +RUN curl -sL https://aka.ms/InstallAzureCLIDeb | bash + +# Install R packages from CRAN +RUN R -e 'install.packages(c("roxygen2", "pkgdown", "rcmdcheck", "fs", "lintr"), repos = "http://cran.us.r-project.org")' \ No newline at end of file diff --git a/.azure-pipelines/scripts/check_code_style.R b/.azure-pipelines/scripts/check_code_style.R new file mode 100644 index 00000000..c1069bc4 --- /dev/null +++ b/.azure-pipelines/scripts/check_code_style.R @@ -0,0 +1,32 @@ +#!/usr/bin/env +args <- commandArgs(trailingOnly = TRUE) +if (length(args) == 0) { + stop("Please provide the directory path", call.=FALSE) +} + +library("lintr") + +check_code_style <- function(directory) { + files <- list.files(directory) + for (filename in files) { + if (filename == "package.R"){ + next + } + + file <- file.path(".", "R", filename) + + style_issues <- lintr::lint(file, linters = with_defaults( + line_length_linter = line_length_linter(240L), + object_length_linter = object_length_linter(40L) + ) + ) + + if (length(style_issues) != 0) { + print(file) + print(style_issues) + stop("Code quality failed.") + } + } +} + +check_code_style(directory = args[1]) diff --git a/.azure-pipelines/scripts/validate_copyright_header.R b/.azure-pipelines/scripts/validate_copyright_header.R new file mode 100644 index 00000000..fdc4bb3d --- /dev/null +++ b/.azure-pipelines/scripts/validate_copyright_header.R @@ -0,0 +1,25 @@ +#!/usr/bin/env +args <- commandArgs(trailingOnly = TRUE) +if (length(args) == 0) { + stop("Please provide the directory path", call.=FALSE) +} + +validate_copyright_header <- function(directory) { + copyright_header <- c("# Copyright(c) Microsoft Corporation.", + "# Licensed under the MIT license.") + files <- list.files(directory) + for (filename in files) { + file <- file.path(".", "R", filename) + file_handle <- file(file, open="r") + lines <- readLines(file_handle) + + assertthat::assert_that(length(lines) >= length(copyright_header)) + + for (i in 1:length(copyright_header)) { + assertthat::assert_that(lines[[i]] == copyright_header[[i]]) + } + } +} + + +validate_copyright_header(directory = args[1]) diff --git a/.azure-pipelines/update-docs.yml b/.azure-pipelines/update-docs.yml new file mode 100644 index 00000000..d0ec5af4 --- /dev/null +++ b/.azure-pipelines/update-docs.yml @@ -0,0 +1,53 @@ +trigger: + batch: true + branches: + include: + - master + +# no PR builds +pr: none + +pool: + vmImage: 'ubuntu-16.04' + +container: ninhu/r-sdk-build:latest + +steps: + - task: Bash@3 + inputs: + targetType: 'inline' + script: | + git config --global user.email "$(GITHUB_USER_EMAIL)" + git config --global user.name "$(GITHUB_USER_NAME)" + branch_ref=$(Build.SourceBranch) + echo BranchRef=$branch_ref + branch_name="${branch_ref/"refs/heads/"}" + echo BranchName=$branch_name + echo GIT CHECKOUT + git checkout $branch_name + echo GIT STATUS + git status + echo UPDATE DOCS + R -e 'dir.create(Sys.getenv("R_LIBS_USER"), recursive = TRUE); + .libPaths(Sys.getenv("R_LIBS_USER")); + devtools::install_deps(upgrade = FALSE); + unlink("man", recursive=TRUE, force=TRUE); + roxygen2::roxygenise(); + unlink("docs", recursive=TRUE, force=TRUE); + pkgdown::build_site();' + retVal=$? + if [ $retVal -ne 0 ]; then + echo "Failed to generate documents!! Exiting..." + exit $retVal + fi + echo GIT ADD + git add man/* docs/* DESCRIPTION NAMESPACE + echo GIT COMMIT + git commit -m "Update R SDK docs via Build $(Build.BuildNumber) [skip ci]" + echo GIT STATUS + git status + echo GIT PUSH + git push https://$(GITHUB_AUTH_TOKEN)@github.com/Azure/azureml-sdk-for-r.git + echo GIT STATUS + git status + displayName: 'Update Docs' diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e67df166 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,23 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..bbcbbe7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..c72a5749 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..c282e9a1 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to +agree to a Contributor License Agreement (CLA) declaring that you have the right to, +and actually do, grant us the rights to use your contribution. For details, visit +https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need +to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the +instructions provided by the bot. You will only need to do this once across all repositories using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. \ No newline at end of file diff --git a/DESCRIPTION b/DESCRIPTION new file mode 100644 index 00000000..71b8c25b --- /dev/null +++ b/DESCRIPTION @@ -0,0 +1,30 @@ +Package: azureml +Type: Package +Title: R interface to AzureML SDK +Version: 0.4.0 +Authors@R: c( + person("Himanshu", "Chandola", email = "Himanshu.Chandola@microsoft.com", role = c("aut")), + person("Billy", "Hu", email = "ninhu@microsoft.com", role = c("aut", "cre")), + person("Heemanshu", "Suri", email = "Heemanshu.Suri@microsoft.com", role = c("aut")), + person("Diondra", "Peck", email = "Diondra.Peck@microsoft.com", role = c("aut")), + person("Microsoft", role = c("cph", "fnd")) + ) +URL: https://github.com/azure/azureml-sdk-for-r +BugReports: https://github.com/azure/azureml-sdk-for-r/issues +Description: Provides an R interface to the AzureML Service. +Encoding: UTF-8 +License: MIT +RoxygenNote: 6.1.1 +Depends: + R (>= 3.5.0) +Imports: + reticulate (>= 1.12), + R6 (>= 2.4.0), + plyr (>= 1.8), + DT, + parsedate, + rstudioapi (>= 0.7) +Suggests: rmarkdown, + knitr, + testthat +VignetteBuilder: knitr diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..4b1ad51b --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/NAMESPACE b/NAMESPACE new file mode 100644 index 00000000..16730278 --- /dev/null +++ b/NAMESPACE @@ -0,0 +1,118 @@ +# Generated by roxygen2: do not edit by hand + +export(aci_webservice_deployment_config) +export(aks_webservice_deployment_config) +export(attach_aks_compute) +export(azureml) +export(bandit_policy) +export(bayesian_parameter_sampling) +export(cancel_run) +export(choice) +export(container_registry) +export(create_aks_compute) +export(create_aml_compute) +export(create_workspace) +export(delete_compute) +export(delete_local_webservice) +export(delete_model) +export(delete_secrets) +export(delete_webservice) +export(delete_workspace) +export(deploy_model) +export(detach_aks_compute) +export(download_file_from_run) +export(download_files_from_run) +export(download_from_datastore) +export(download_model) +export(estimator) +export(experiment) +export(generate_new_webservice_key) +export(get_aks_compute_credentials) +export(get_best_run_by_primary_metric) +export(get_child_run_hyperparameters) +export(get_child_run_metrics) +export(get_child_runs_sorted_by_primary_metric) +export(get_compute) +export(get_current_run) +export(get_datastore) +export(get_default_datastore) +export(get_default_keyvault) +export(get_environment) +export(get_model) +export(get_model_package_container_registry) +export(get_model_package_creation_logs) +export(get_run) +export(get_run_details) +export(get_run_details_with_logs) +export(get_run_file_names) +export(get_run_metrics) +export(get_runs_in_experiment) +export(get_secrets) +export(get_secrets_from_run) +export(get_webservice) +export(get_webservice_keys) +export(get_webservice_logs) +export(get_webservice_token) +export(get_workspace) +export(get_workspace_details) +export(grid_parameter_sampling) +export(hyperdrive_config) +export(inference_config) +export(install_azureml) +export(invoke_webservice) +export(list_nodes_in_aml_compute) +export(list_secrets) +export(list_supported_vm_sizes) +export(list_workspaces) +export(load_workspace_from_config) +export(local_webservice_deployment_config) +export(log_accuracy_table_to_run) +export(log_confusion_matrix_to_run) +export(log_image_to_run) +export(log_list_to_run) +export(log_metric_to_run) +export(log_predictions_to_run) +export(log_residuals_to_run) +export(log_row_to_run) +export(log_table_to_run) +export(lognormal) +export(loguniform) +export(median_stopping_policy) +export(normal) +export(package_model) +export(primary_metric_goal) +export(pull_model_package_image) +export(qlognormal) +export(qloguniform) +export(qnormal) +export(quniform) +export(r_environment) +export(randint) +export(random_parameter_sampling) +export(register_azure_blob_container_datastore) +export(register_azure_file_share_datastore) +export(register_environment) +export(register_model) +export(reload_local_webservice_assets) +export(save_model_package_files) +export(set_default_datastore) +export(set_secrets) +export(submit_experiment) +export(truncation_selection_policy) +export(uniform) +export(unregister_datastore) +export(update_aci_webservice) +export(update_aks_webservice) +export(update_aml_compute) +export(update_local_webservice) +export(upload_files_to_datastore) +export(upload_to_datastore) +export(view_run_details) +export(wait_for_deployment) +export(wait_for_model_package_creation) +export(wait_for_provisioning_completion) +export(wait_for_run_completion) +export(write_workspace_config) +importFrom(reticulate,import) +importFrom(reticulate,py_str) +importFrom(reticulate,use_condaenv) diff --git a/R/compute.R b/R/compute.R new file mode 100644 index 00000000..4252e1c4 --- /dev/null +++ b/R/compute.R @@ -0,0 +1,373 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create an AmlCompute cluster +#' +#' @description +#' Provision Azure Machine Learning Compute (AmlCompute) as a compute target +#' for training. AmlCompute is a managed-compute infrastructure that allows the +#' user to easily create a single or multi-node compute. To create a persistent +#' AmlCompute resource that can be reused across jobs, make sure to specify the +#' `vm_size` and `max_nodes` parameters. The compute can then be shared with +#' other users in the workspace and is kept between jobs. If `min_nodes = 0`, +#' the compute autoscales down to zero nodes when it isn't used, and scales up +#' automatically when a job is submitted. +#' +#' AmlCompute has default limits, such as the number of cores that can be +#' allocated. For more information, see +#' [Manage and request quotas for Azure resources](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas). +#' @param workspace The `Workspace` object. +#' @param cluster_name A string of the name of the cluster. +#' @param vm_size A string of the size of agent VMs. More details can be found +#' [here](https://aka.ms/azureml-vm-details). +#' Note that not all sizes are available in all regions, as detailed in the +#' aformentioned link. Defaults to `'Standard_NC6'`. +#' @param vm_priority A string of either `'dedicated'` or `'lowpriority'` to +#' use either dedicated or low-priority VMs. Defaults to `'dedicated'`. +#' @param min_nodes An integer of the minimum number of nodes to use on the +#' cluster. If not specified, will default to `0`. +#' @param max_nodes An integer of the maximum number of nodes to use on the +#' cluster. +#' @param idle_seconds_before_scaledown An integer of the node idle time in +#' seconds before scaling down the cluster. Defaults to `120`. +#' @param admin_username A string of the name of the administrator user account +#' that can be used to SSH into nodes. +#' @param admin_user_password A string of the password of the administrator user +#' account. +#' @param admin_user_ssh_key A string of the SSH public key of the administrator +#' user account. +#' @param vnet_resourcegroup_name A string of the name of the resource group +#' where the virtual network is located. +#' @param vnet_name A string of the name of the virtual network. +#' @param subnet_name A string of the name of the subnet inside the vnet. +#' @param tags A named list of tags for the cluster, e.g. +#' `list("tag" = "value")`.` +#' @param description A string of the description for the cluster. +#' @return The `AmlCompute` object. +#' @export +#' @section Details: +#' For more information on using an Azure Machine Learning Compute resource +#' in a virtual network, see +#' [Secure Azure ML experimentation and inference jobs within an Azure Virtual Network](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#use-a-machine-learning-compute-instance). +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' compute_target <- create_aml_compute(ws, +#' cluster_name = 'mycluster', +#' vm_size = 'STANDARD_D2_V2', +#' max_nodes = 1) +#' wait_for_provisioning_completion(compute_target, show_output = TRUE) +#' ``` +#' @seealso +#' `wait_for_provisioning_completion()` +#' @md +create_aml_compute <- function(workspace, + cluster_name, + vm_size, + vm_priority = "dedicated", + min_nodes = 0, + max_nodes = NULL, + idle_seconds_before_scaledown = NULL, + admin_username = NULL, + admin_user_password = NULL, + admin_user_ssh_key = NULL, + vnet_resourcegroup_name = NULL, + vnet_name = NULL, + subnet_name = NULL, + tags = NULL, + description = NULL) { + compute_config <- azureml$core$compute$AmlCompute$provisioning_configuration( + vm_size = vm_size, + vm_priority = vm_priority, + min_nodes = min_nodes, + max_nodes = max_nodes, + idle_seconds_before_scaledown = idle_seconds_before_scaledown, + admin_username = admin_username, + admin_user_password = admin_user_password, + admin_user_ssh_key = admin_user_ssh_key, + vnet_resourcegroup_name = vnet_resourcegroup_name, + vnet_name = vnet_name, + subnet_name = subnet_name, + tags = tags, + description = description) + + azureml$core$compute$ComputeTarget$create(workspace, + cluster_name, + compute_config) +} + +#' Get an existing compute cluster +#' +#' @description +#' Returns an `AmlCompute` or `AksCompute` object for an existing compute +#' resource. If the compute target doesn't exist, the function will return +#' `NULL`. +#' @param workspace The `Workspace` object. +#' @param cluster_name A string of the name of the cluster. +#' @return The `AmlCompute` or `AksCompute` object. +#' @export +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' compute_target <- get_compute(ws, cluster_name = 'mycluster') +#' ``` +#' @md +get_compute <- function(workspace, cluster_name) { + tryCatch({ + azureml$core$compute$ComputeTarget(workspace = workspace, + name = cluster_name) + }, + error = function(e) { + if (grepl("ComputeTargetException", e$message, )) { + NULL + } else { + stop(message(e)) + } + } + ) +} + +#' Wait for a cluster to finish provisioning +#' +#' @description +#' Wait for a cluster to finish provisioning. Typically invoked after a +#' `create_aml_compute()` or `create_aks_compute()` call. +#' @param cluster The `AmlCompute` or `AksCompute` object. +#' @param show_output If `TRUE`, more verbose output will be provided. +#' @export +#' @section Examples: +#' Wait for an AmlCompute cluster to finish provisioning. +#' ``` +#' ws <- load_workspace_from_config() +#' compute_target <- create_aml_compute(ws, +#' cluster_name = 'mycluster', +#' vm_size = 'STANDARD_D2_V2', +#' max_nodes = 1) +#' wait_for_provisioning_completion(compute_target) +#' @seealso +#' `create_aml_compute()`, `create_aks_compute()` +#' @md +wait_for_provisioning_completion <- function(cluster, show_output = FALSE) { + cluster$wait_for_completion(show_output) +} + +#' Delete a cluster +#' +#' @description +#' Remove the compute object from its associated workspace and delete the +#' corresponding cloud-based resource. +#' @param cluster The `AmlCompute` or `AksCompute` object. +#' @export +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' compute_target <- get_compute(ws, cluster_name = 'mycluster') +#' delete_compute(compute_target) +#' ``` +#' @md +delete_compute <- function(cluster) { + cluster$delete() + invisible(NULL) +} + +#' Update scale settings for an AmlCompute cluster +#' +#' @description +#' Update the scale settings for an existing AmlCompute cluster. +#' @param cluster The `AmlCompute` cluster. +#' @param min_nodes An integer of the minimum number of nodes to use on +#' the cluster. +#' @param max_nodes An integer of the maximum number of nodes to use on +#' the cluster. +#' @param idle_seconds_before_scaledown An integer of the node idle time +#' in seconds before scaling down the cluster. +#' @export +#' @md +update_aml_compute <- function(cluster, min_nodes = NULL, max_nodes = NULL, + idle_seconds_before_scaledown = NULL) { + cluster$update(cluster = cluster, + min_nodes = min_nodes, + max_nodes = max_nodes, + idle_seconds_before_scaledown = idle_seconds_before_scaledown) + invisible(NULL) +} + +#' Get the details (e.g IP address, port etc) of all the compute nodes in the +#' compute target +#' +#' @param cluster cluster object +#' @return Details of all the compute nodes in the cluster in data frame +#' @export +#' @md +list_nodes_in_aml_compute <- function(cluster) { + nodes <- cluster$list_nodes() + plyr::ldply(nodes, data.frame) +} + +#' Create an AksCompute cluster +#' +#' @description +#' Provision an Azure Kubernetes Service instance (AksCompute) as a compute +#' target for web service deployment. AksCompute is recommended for high-scale +#' production deployments and provides fast response time and autoscaling of +#' the deployed service. Cluster autoscaling isn't supported through the Azure +#' ML R SDK. To change the nodes in the AksCompute cluster, use the UI for the +#' cluster in the Azure portal. Once created, the cluster can be reused for +#' multiple deployments. +#' @param workspace The `Workspace` object. +#' @param cluster_name A string of the name of the cluster. +#' @param agent_count An integer of the number of agents (VMs) to host +#' containers. Defaults to `3`. +#' @param vm_size A string of the size of agent VMs. More details can be found +#' [here](https://aka.ms/azureml-vm-details). +#' Note that not all sizes are available in all regions, as detailed in the +#' aformentioned link. Defaults to `'Standard_D3_v2'`. +#' @param ssl_cname A string of a CName to use if enabling SSL validation on +#' the cluster. Must provide all three - CName, cert file, and key file - to +#' enable SSL validation. +#' @param ssl_cert_pem_file A string of a file path to a file containing cert +#' information for SSL validation. Must provide all three - CName, cert file, +#' and key file - to enable SSL validation. +#' @param ssl_key_pem_file A string of a file path to a file containing key +#' information for SSL validation. Must provide all three - CName, cert file, +#' and key file - to enable SSL validation. +#' @param location A string of the location to provision the cluster in. If not +#' specified, defaults to the workspace location. Available regions for this +#' compute can be found here: +#' "https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=kubernetes-service". +#' @param vnet_resourcegroup_name A string of the name of the resource group +#' where the virtual network is located. +#' @param vnet_name A string of the name of the virtual network. +#' @param subnet_name A string of the name of the subnet inside the vnet. +#' @param service_cidr A string of a CIDR notation IP range from which to assign +#' service cluster IPs. +#' @param dns_service_ip A string of the container's DNS server IP address. +#' @param docker_bridge_cidr A string of a CIDR notation IP for Docker bridge. +#' @return An `AksCompute` object. +#' @export +#' @section Details: +#' For more information on using an AksCompute resource within a virtual +#' network, see +#' [Secure Azure ML experimentation and inference jobs within an Azure Virtual Network](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#use-azure-kubernetes-service-aks) +#' @section Examples: +#' Create an AksCompute cluster using the default configuration (you can also +#' provide parameters to customize this). +#' ``` +#' ws <- load_workspace_from_config() +#' compute_target <- create_aks_compute(ws, cluster_name = 'mycluster') +#' wait_for_provisioning_completion(compute_target) +#' ``` +#' @md +create_aks_compute <- function(workspace, + cluster_name, + agent_count = NULL, + vm_size = NULL, + ssl_cname = NULL, + ssl_cert_pem_file = NULL, + ssl_key_pem_file = NULL, + location = NULL, + vnet_resourcegroup_name = NULL, + vnet_name = NULL, + subnet_name = NULL, + service_cidr = NULL, + dns_service_ip = NULL, + docker_bridge_cidr = NULL) { + compute_config <- azureml$core$compute$AksCompute$provisioning_configuration( + agent_count = agent_count, + vm_size = vm_size, + ssl_cname = ssl_cname, + ssl_cert_pem_file = ssl_cert_pem_file, + ssl_key_pem_file = ssl_key_pem_file, + location = location, + vnet_resourcegroup_name = vnet_resourcegroup_name, + vnet_name = vnet_name, + subnet_name = subnet_name, + service_cidr = service_cidr, + dns_service_ip = dns_service_ip, + docker_bridge_cidr = docker_bridge_cidr) + + azureml$core$compute$ComputeTarget$create(workspace, + cluster_name, + compute_config) +} + +#' Get the credentials for an AksCompute cluster +#' +#' @description +#' Retrieved the credentials for an AksCompute cluster. +#' @param cluster The `AksCompute` object. +#' @return A named list of the cluster credentials. +#' @export +#' @md +get_aks_compute_credentials <- function(cluster) { + cluster$get_credentials() +} + +#' Attach an existing AKS cluster to a workspace +#' +#' @description +#' If you already have an AKS cluster in your Azure subscription, and it is +#' version 1.12.##, you can attach it to your workspace to use for deployments. +#' The existing AKS cluster can be in a different Azure region than your +#' workspace. +#' +#' If you want to secure your AKS cluster using an Azure Virtual Network, you +#' must create the virtual network first. For more information, see +#' [Secure Azure ML experimentation and inference jobs within an Azure Virtual Network](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#aksvnet) +#' @param workspace The `Workspace` object to attach the AKS cluster to. +#' @param cluster_name A string of the name for the cluster. +#' @param resource_id A string of the resource ID for the AKS cluster being +#' attached. +#' @param resource_group A string of the resource group in which the AKS cluster +#' is located. +#' @return The `AksCompute` object. +#' @export +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' compute_target <- attach_aks_compute(ws, +#' cluster_name = 'mycluster', +#' resource_id = 'myresourceid', +#' resource_group = 'myresourcegroup') +#' ``` +#' @md +attach_aks_compute <- function(workspace, + cluster_name, + resource_id = NULL, + resource_group = NULL) { + attach_config <- azureml$core$compute$AksCompute$attach_configuration( + resource_group = resource_group, resource_id = resource_id) + + azureml$core$compute$ComputeTarget$attach(workspace, + cluster_name, + attach_config) +} + +#' Detach an AksCompute cluster from its associated workspace +#' +#' @description +#' Detach the AksCompute cluster from its associated workspace. No +#' underlying cloud resource will be deleted; the association will +#' just be removed. +#' @param cluster The `AksCompute` object. +#' @export +#' @md +detach_aks_compute <- function(cluster) { + cluster$detach() + invisible(NULL) +} + +#' List the supported VM sizes in a region +#' +#' @param workspace The `Workspace` object. +#' @param location A string of the location of the cluster. If not specified, +#' will default to the workspace location. +#' @return List of supported VM sizes in a region with name of the VM, VCPUs, +#' RAM in data frame +#' @export +#' @md +list_supported_vm_sizes <- function(workspace, location = NULL) { + vm_sizes <- azureml$core$compute$AmlCompute$supported_vmsizes(workspace, + location) + plyr::ldply(vm_sizes, data.frame) +} diff --git a/R/datastore.R b/R/datastore.R new file mode 100644 index 00000000..7204207a --- /dev/null +++ b/R/datastore.R @@ -0,0 +1,266 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Upload files to the Azure storage a datastore points to +#' +#' @description +#' Upload the data from the local file system to the Azure storage that the +#' datastore points to. +#' @param datastore The `AzureBlobDatastore` or `AzureFileDatastore` object. +#' @param files A list of strings of the absolute path to files to upload. +#' @param relative_root A string of the base path from which is used to +#' determine the path of the files in the Azure storage. For example, if +#' we upload `/path/to/file.txt`, and we define the base path to be `/path`, +#' when `file.txt` is uploaded to the blob storage or file share, it will +#' have the path of `/to/file.txt`. If `target_path` is also given, then it +#' will be used as the prefix for the derived path from above. The base path +#' must be a common path of all of the files, otherwise an exception will be +#' thrown. +#' @param target_path A string of the location in the blob container or file +#' share to upload the data to. Defaults to `NULL`, in which case the data is +#' uploaded to the root. +#' @param overwrite If `TRUE`, overwrites any existing data at `target_path`. +#' @param show_progress If `TRUE`, show progress of upload in the console. +#' @return The `DataReference` object for the target path uploaded. +#' @export +#' @md +upload_files_to_datastore <- function(datastore, files, + relative_root = NULL, + target_path = NULL, + overwrite = FALSE, + show_progress = TRUE) { + datastore$upload_files(files, + relative_root, + target_path, + overwrite, + show_progress) + invisible(NULL) +} + +#' Upload a local directory to the Azure storage a datastore points to +#' +#' @description +#' Upload a local directory to the Azure storage the datastore points to. +#' @param datastore The `AzureBlobDatastore` or `AzureFileDatastore` object. +#' @param src_dir A string of the local directory to upload. +#' @param target_path A string of the location in the blob container or +#' file share to upload the data to. Defaults to `NULL`, in which case the data +#' is uploaded to the root. +#' @param overwrite If `TRUE`, overwrites any existing data at `target_path`. +#' @param show_progress If `TRUE`, show progress of upload in the console. +#' @return The `DataReference` object for the target path uploaded. +#' @export +#' @md +upload_to_datastore <- function(datastore, + src_dir, + target_path = NULL, + overwrite = FALSE, + show_progress = TRUE) { + datastore$upload(src_dir, target_path, overwrite, show_progress) + invisible(NULL) +} + +#' Download data from a datastore to the local file system +#' +#' @description +#' Download data from the datastore to the local file system. +#' @param datastore The `AzureBlobDatastore` or `AzureFileDatastore` object. +#' @param target_path A string of the local directory to download the file to. +#' @param prefix A string of the path to the folder in the blob container +#' or file store to download. If `NULL`, will download everything in the blob +#' container or file share +#' @param overwrite If `TRUE`, overwrites any existing data at `target_path`. +#' @param show_progress If `TRUE`, show progress of upload in the console. +#' @return An integer of the number of files successfully downloaded. +#' @export +#' @md +download_from_datastore <- function(datastore, + target_path, + prefix = NULL, + overwrite = FALSE, + show_progress = TRUE) { + datastore$download(target_path, + prefix = prefix, + overwrite = overwrite, + show_progress = show_progress) + invisible(NULL) +} + +#' Get an existing datastore +#' +#' @description +#' Get the corresponding datastore object for an existing +#' datastore by name from the given workspace. +#' @param workspace The `Workspace` object. +#' @param datastore_name A string of the name of the datastore. +#' @return The `AzureBlobDatastore` or `AzureFileDatastore` object. +#' @export +#' @md +get_datastore <- function(workspace, datastore_name) { + azureml$core$Datastore$get(workspace, datastore_name) +} + +#' Register an Azure blob container as a datastore +#' +#' @description +#' Register an Azure blob container as a datastore. You can choose to use +#' either the SAS token or the storage account key. +#' @param workspace The `Workspace` object. +#' @param datastore_name A string of the name of the datastore. The name +#' must be case insensitive and can only contain alphanumeric characters and +#' underscores. +#' @param container_name A string of the name of the Azure blob container. +#' @param account_name A string of the storage account name. +#' @param sas_token A string of the account SAS token. +#' @param account_key A string of the storage account key. +#' @param protocol A string of the protocol to use to connect to the +#' blob container. If `NULL`, defaults to `'https'`. +#' @param endpoint A string of the endpoint of the blob container. +#' If `NULL`, defaults to `'core.windows.net'`. +#' @param overwrite If `TRUE`, overwrites an existing datastore. If +#' the datastore does not exist, it will create one. +#' @param create_if_not_exists If `TRUE`, creates the blob container +#' if it does not exists. +#' @param skip_validation If `TRUE`, skips validation of storage keys. +#' @param blob_cache_timeout An integer of the cache timeout in seconds +#' when this blob is mounted. If `NULL`, defaults to no timeout (i.e. +#' blobs will be cached for the duration of the job when read). +#' @param grant_workspace_access If `TRUE`, grants workspace Managed Identities +#' (MSI) access to the user storage account. This should be set to `TRUE` if the +#' storage account is in VNET. If `TRUE`, Azure ML will use the workspace MSI +#' token to grant access to the user storage account. It may take a while for +#' the granted access to reflect. +#' @param subscription_id A string of the subscription id of the storage +#' account. +#' @param resource_group A string of the resource group of the storage account. +#' @return The `AzureBlobDatastore` object. +#' @export +#' @section Details: +#' In general we recommend Azure Blob storage over Azure File storage. Both +#' standard and premium storage are available for blobs. Although more +#' expensive, we suggest premium storage due to faster throughput speeds that +#' may improve the speed of your training runs, particularly if you train +#' against a large dataset. +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' ds <- register_azure_blob_container_datastore( +#' ws, +#' datastore_name = 'mydatastore', +#' container_name = 'myazureblobcontainername', +#' account_name = 'mystorageaccoutname', +#' account_key = 'mystorageaccountkey') +#' ``` +#' @md +register_azure_blob_container_datastore <- function( + workspace, + datastore_name, + container_name, + account_name, + sas_token = NULL, + account_key = NULL, + protocol = NULL, + endpoint = NULL, + overwrite = FALSE, + create_if_not_exists = FALSE, + skip_validation = FALSE, + blob_cache_timeout = NULL, + grant_workspace_access = FALSE, + subscription_id = NULL, + resource_group = NULL) { + azureml$core$Datastore$register_azure_blob_container( + workspace = workspace, + datastore_name = datastore_name, + container_name = container_name, + account_name = account_name, + sas_token = sas_token, + account_key = account_key, + protocol = protocol, + endpoint = endpoint, + overwrite = overwrite, + create_if_not_exists = create_if_not_exists, + skip_validation = skip_validation, + blob_cache_timeout = blob_cache_timeout, + grant_workspace_access = grant_workspace_access, + subscription_id = subscription_id, + resource_group = resource_group) +} + +#' Register an Azure file share as a datastore +#' +#' @description +#' Register an Azure file share as a datastore. You can choose to use +#' either the SAS token or the storage account key. +#' @param workspace The `Workspace` object. +#' @param datastore_name A string of the name of the datastore. The name +#' must be case insensitive and can only contain alphanumeric characters and +#' underscores. +#' @param file_share_name A string of the name of the Azure file share. +#' @param account_name A string of the storage account name. +#' @param sas_token A string of the account SAS token. +#' @param account_key A string of the storage account key. +#' @param protocol A string of the protocol to use to connect to the +#' file store. If `NULL`, defaults to `'https'`. +#' @param endpoint A string of the endpoint of the file store. +#' If `NULL`, defaults to `'core.windows.net'`. +#' @param overwrite If `TRUE`, overwrites an existing datastore. If +#' the datastore does not exist, it will create one. +#' @param create_if_not_exists If `TRUE`, creates the file share +#' if it does not exists. +#' @param skip_validation If `TRUE`, skips validation of storage keys. +#' @return The `AzureFileDatastore` object. +#' @export +#' @section Details: +#' In general we recommend Azure Blob storage over Azure File storage. Both +#' standard and premium storage are available for blobs. Although more +#' expensive, we suggest premium storage due to faster throughput speeds that +#' may improve the speed of your training runs, particularly if you train +#' against a large dataset. +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' ds <- register_azure_file_share_datastore( +#' ws, +#' datastore_name = 'mydatastore', +#' file_share_name = 'myazurefilesharename', +#' account_name = 'mystorageaccoutname', +#' account_key = 'mystorageaccountkey') +#' ``` +#' @md +register_azure_file_share_datastore <- function(workspace, + datastore_name, + file_share_name, + account_name, + sas_token = NULL, + account_key = NULL, + protocol = NULL, + endpoint = NULL, + overwrite = FALSE, + create_if_not_exists = FALSE, + skip_validation = FALSE) { + azureml$core$Datastore$register_azure_file_share( + workspace = workspace, + datastore_name = datastore_name, + file_share_name = file_share_name, + account_name = account_name, + sas_token = sas_token, + account_key = account_key, + protocol = protocol, + endpoint = endpoint, + overwrite = overwrite, + create_if_not_exists = create_if_not_exists, + skip_validation = skip_validation) +} + +#' Unregister a datastore from its associated workspace +#' +#' @description +#' Unregister the datastore from its associated workspace. The +#' underlying Azure storage will not be deleted. +#' @param datastore The `AzureBlobDatastore` or `AzureFileDatastore` object. +#' @export +#' @md +unregister_datastore <- function(datastore) { + datastore$unregister() + invisible(NULL) +} diff --git a/R/environment.R b/R/environment.R new file mode 100644 index 00000000..75f7d36a --- /dev/null +++ b/R/environment.R @@ -0,0 +1,237 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create an environment +#' +#' @description +#' Configure the R environment to be used for training or web service +#' deployments. When you submit a run or deploy a model, Azure ML builds a +#' Docker image and creates a conda environment with your specifications from +#' your `Environment` object within that Docker container. +#' +#' If the `custom_docker_image` parameter +#' is not set, Azure ML automatically uses a default base image (CPU or GPU +#' depending on the `use_gpu` flag) and installs any R packages specified in the +#' `cran_packages`, `github_packages`, or `custom_url_packages` parameters. +#' TODO: link to the Dockerfiles of the default base images. +#' @param name A string of the name of the environment. +#' @param version A string of the version of the environment. +#' @param environment_variables A named list of environment variables names +#' and values. These environment variables are set on the process where the user +#' script is being executed. +#' @param cran_packages A character vector of CRAN packages to be installed. +#' @param github_packages A character vector of GitHub packages to be installed. +#' @param custom_url_packages A character vector of packages to be installed +#' from local directory or custom URL. +#' @param custom_docker_image A string of the name of the Docker image from +#' which the image to use for training or deployment will be built. If not set, +#' a default CPU-based image will be used as the base image. To use an image +#' from a private Docker repository, you will also have to specify the +#' `image_registry_details` parameter. +#' @param image_registry_details A `ContainerRegistry` object of the details of +#' the Docker image registry for the custom Docker image. +#' @param use_gpu Indicates whether the environment should support GPUs. +#' If `TRUE`, a GPU-based default Docker image will be used in the environment. +#' If `FALSE`, a CPU-based image will be used. Default Docker images (CPU or +#' GPU) will only be used if the `custom_docker_image` parameter is not set. +#' @param shm_size A string for the size of the Docker container's shared +#' memory block. For more information, see +#' [Docker run reference](https://docs.docker.com/engine/reference/run/) +#' If not set, a default value of `'2g'` is used. +#' @return The `Environment` object. +#' @export +#' @section Details: +#' Once built, the Docker image appears in the Azure Container Registry +#' associated with your workspace, by default. The repository name has the form +#' *azureml/azureml_*. The unique identifier (*uuid*) part corresponds to +#' a hash computed from the environment configuration. This allows the service +#' to determine whether an image corresponding to the given environment already +#' exists for reuse. +#' +#' If you make changes to an existing environment, such as adding an R package, +#' a new version of the environment is created when you either submit a run, +#' deploy a model, or manually register the environment. The versioning allows +#' you to view changes to the environment over time. +#' @section Examples: +#' The following example defines an environment that will use the default +#' base CPU image and install the additional e1071 package from CRAN. +#' ``` +#' r_env <- r_environment(name = 'myr_env', +#' version = '1', +#' cran_packages = c('e1071')) +#' ``` +#' @seealso +#' `estimator()`, `inference_config()` +#' @md +r_environment <- function(name, version = NULL, + environment_variables = NULL, + cran_packages = NULL, + github_packages = NULL, + custom_url_packages = NULL, + custom_docker_image = NULL, + image_registry_details = NULL, + use_gpu = FALSE, + shm_size = NULL) { + env <- azureml$core$Environment(name) + env$version <- version + env$python$user_managed_dependencies <- TRUE + env$environment_variables <- environment_variables + env$docker$enabled <- TRUE + env$docker$base_image <- custom_docker_image + + if (!is.null(image_registry_details)) { + env$docker$base_image_registry <- image_registry_details + } + if (!is.null(shm_size)) { + env$docker$shm_size <- shm_size + } + + if (is.null(custom_docker_image)) { + processor <- "cpu" + if (use_gpu) { + processor <- "gpu" + } + env$docker$base_image <- paste("r-base", + processor, + sep = ":") + env$docker$base_image_registry$address <- + "viennaprivate.azurecr.io" + } + + # if extra package is specified, generate dockerfile + if (!is.null(cran_packages) || + !is.null(github_packages) || + !is.null(custom_url_packages)) { + base_image_with_address <- NULL + registry_address <- env$docker$base_image_registry$address + if (!is.null(env$docker$base_image_registry$address)) { + base_image_with_address <- paste(registry_address, + env$docker$base_image, + sep = "/") + } + env$docker$base_dockerfile <- generate_docker_file(base_image_with_address, + cran_packages, + github_packages, + custom_url_packages) + env$docker$base_image <- NULL + } + + invisible(env) +} + +#' Register an environment in the workspace +#' +#' @description +#' The environment is automatically registered with your workspace when you +#' submit an experiment or deploy a web service. You can also manually register +#' the environment with `register_environment()`. This operation makes the +#' environment into an entity that is tracked and versioned in the cloud, and +#' can be shared between workspace users. +#' +#' Whe used for the first time in training or deployment, the environment is +#' registered with the workspace, built, and deployed on the compute target. +#' The environments are cached by the service. Reusing a cached environment +#' takes much less time than using a new service or one that has bee updated. +#' @param workspace The `Workspace` object. +#' @param environment The `Environment` object. +#' @return The `Environment` object. +#' @export +#' @md +register_environment <- function(environment, workspace) { + env <- environment$register(workspace) + invisible(env) +} + +#' Get an existing environment +#' +#' @description +#' Returns an `Environment` object for an existing environment in +#' the workspace. +#' @param workspace The `Workspace` object. +#' @param name A string of the name of the environment. +#' @param version A string of the version of the environment. +#' @return The `Environment` object. +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' env <- get_environment(ws, name = 'myenv', version = '1') +#' ``` +#' @export +#' @md +get_environment <- function(workspace, name, version = NULL) { + azureml$core$Environment$get(workspace, name, version) +} + +#' Specify Azure Container Registry details +#' +#' @description +#' Returns a `ContainerRegistry` object with the details for an +#' Azure Container Registry (ACR). This is needed when a custom +#' Docker image used for training or deployment is located in +#' a private image registry. Provide a `ContainerRegistry` object +#' to the `image_registry_details` parameter of either `r_environment()` +#' or `estimator()`. +#' @param address A string of the DNS name or IP address of the +#' Azure Container Registry (ACR). +#' @param username A string of the username for ACR. +#' @param password A string of the password for ACR. +#' @return The `ContainerRegistry` object. +#' @export +#' @seealso +#' `r_environment()`, `estimator()` +#' @md +container_registry <- function(address = NULL, + username = NULL, + password = NULL) { + container_registry <- azureml$core$ContainerRegistry() + container_registry$address <- address + container_registry$username <- username + container_registry$password <- password + + invisible(container_registry) +} + +#' Generate a dockerfile string to build the image for training. +#' @param custom_docker_image The name of the docker image from which the image +#' to use for training will be built. If not set, a default CPU based image will +#' be used as the base image. +#' @param cran_packages character vector of cran packages to be installed. +#' @param github_packages character vector of github packages to be installed. +#' @param custom_url_packages character vector of packages to be installed from +#' local, directory or custom url. +generate_docker_file <- function(custom_docker_image = NULL, + cran_packages = NULL, + github_packages = NULL, + custom_url_packages = NULL) { + base_dockerfile <- NULL + base_dockerfile <- paste0(base_dockerfile, sprintf("FROM %s\n", + custom_docker_image)) + + if (!is.null(cran_packages)) { + for (package in cran_packages) { + base_dockerfile <- paste0( + base_dockerfile, + sprintf("RUN R -e \"install.packages(\'%s\', ", package), + "repos = \'http://cran.us.r-project.org\')\"\n") + } + } + + if (!is.null(github_packages)) { + for (package in github_packages) { + base_dockerfile <- paste0( + base_dockerfile, + sprintf("RUN R -e \"devtools::install_github(\'%s\')\"\n", package)) + } + } + + if (!is.null(custom_url_packages)) { + for (package in custom_url_packages) { + base_dockerfile <- paste0( + base_dockerfile, + sprintf("RUN R -e \"install.packages(\'%s\', repos = NULL)\"\n", + package)) + } + } + + invisible(base_dockerfile) +} diff --git a/R/estimator.R b/R/estimator.R new file mode 100644 index 00000000..9f0fa52f --- /dev/null +++ b/R/estimator.R @@ -0,0 +1,110 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create an estimator +#' +#' @description +#' An Estimator wraps run configuration information for specifying details +#' of executing an R script. Running an Estimator experiment +#' (using `submit_experiment()`) will return a `ScriptRun` object and +#' execute your training script on the specified compute target. +#' @param source_directory A string of the local directory containing +#' experiment configuration and code files needed for the training job. +#' @param compute_target The `AmlCompute` object for the compute target +#' where training will happen. +#' @param vm_size A string of the VM size of the compute target that will be +#' created for the training job. The list of available VM sizes +#' are listed [here](https://docs.microsoft.com/azure/cloud-services/cloud-services-sizes-specs). +#' Provide this parameter if you want to create AmlCompute as the compute target +#' at run time, instead of providing an existing cluster to the `compute_target` +#' parameter. If `vm_size` is specified, a single-node cluster is automatically +#' created for your run and is deleted automatically once the run completes. +#' @param vm_priority A string of either `'dedicated'` or `'lowpriority'` to +#' specify the VM priority of the compute target that will be created for the +#' training job. Defaults to `'dedicated'`. This takes effect only when the +#' `vm_size` parameter is specified. +#' @param entry_script A string representing the relative path to the file used +#' to start training. +#' @param script_params A named list of the command-line arguments to pass to +#' the training script specified in `entry_script`. +#' @param cran_packages A character vector of CRAN packages to be installed. +#' @param github_packages A character vector of GitHub packages to be installed. +#' @param custom_url_packages A character vector of packages to be installed +#' from local directory or custom URL. +#' @param custom_docker_image A string of the name of the Docker image from +#' which the image to use for training will be built. If not set, a default +#' CPU-based image will be used as the base image. To use an image from a +#' private Docker repository, you will also have to specify the +#' `image_registry_details` parameter. +#' @param image_registry_details A `ContainerRegistry` object of the details of +#' the Docker image registry for the custom Docker image. +#' @param use_gpu Indicates whether the environment to run the experiment should +#' support GPUs. If `TRUE`, a GPU-based default Docker image will be used in the +#' environment. If `FALSE`, a CPU-based image will be used. Default Docker +#' images (CPU or GPU) will only be used if the `custom_docker_image` parameter +#' is not set. +#' @param environment_variables A named list of environment variables names +#' and values. These environment variables are set on the process where the user +#' script is being executed. +#' @param shm_size A string for the size of the Docker container's shared +#' memory block. For more information, see +#' [Docker run reference](https://docs.docker.com/engine/reference/run/). +#' If not set, a default value of `'2g'` is used. +#' @param max_run_duration_seconds An integer of the maximum allowed time for +#' the run. Azure ML will attempt to automatically cancel the run if it takes +#' longer than this value. +#' @param environment The `Environment` object that configures the R +#' environment where the experiment is executed. This parameter is mutually +#' exclusive with the other environment-related parameters `custom_docker_image` +#' , `image_registry_details`, `use_gpu`, `environment_variables`, `shm_size`, +#' `cran_packages`, `github_packages`, and `custom_url_packages` and if set +#' will take precedence over those parameters. +#' @return The `Estimator` object. +#' @export +#' @seealso +#' `r_environment()`, `container_registry()`, `submit_experiment()` +#' @md +estimator <- function(source_directory, + compute_target = NULL, + vm_size = NULL, + vm_priority = NULL, + entry_script = NULL, + script_params = NULL, + cran_packages = NULL, + github_packages = NULL, + custom_url_packages = NULL, + custom_docker_image = NULL, + image_registry_details = NULL, + use_gpu = FALSE, + environment_variables = NULL, + shm_size = NULL, + max_run_duration_seconds = NULL, + environment = NULL) { + + if (is.null(environment)) { + environment <- r_environment( + name = NULL, + environment_variables = environment_variables, + cran_packages = cran_packages, + github_packages = github_packages, + custom_url_packages = custom_url_packages, + image_registry_details = image_registry_details, + use_gpu = use_gpu, + shm_size = shm_size, + custom_docker_image = custom_docker_image) + } + + est <- azureml$train$estimator$Estimator( + source_directory, + compute_target = compute_target, + vm_size = vm_size, + vm_priority = vm_priority, + entry_script = entry_script, + script_params = script_params, + max_run_duration_seconds = max_run_duration_seconds, + environment_definition = environment) + + run_config <- est$run_config + run_config$framework <- "R" + invisible(est) +} diff --git a/R/experiment.R b/R/experiment.R new file mode 100644 index 00000000..a4c43a5f --- /dev/null +++ b/R/experiment.R @@ -0,0 +1,92 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create an Azure Machine Learning experiment +#' +#' @description +#' An experiment is a grouping of many runs from a specified script. +#' +#' @param workspace The `Workspace` object. +#' @param name A string of the experiment name. The name must be between +#' 3-36 characters, start with a letter or number, and can only contain +#' letters, numbers, underscores, and dashes. +#' @return The `Experiment` object. +#' @export +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' exp <- experiment(ws, name = 'myexperiment') +#' ``` +#' @seealso +#' `submit_experiment()` +#' @md +experiment <- function(workspace, name) { + azureml$core$Experiment(workspace, name) +} + +#' Submit an experiment and return the active created run +#' +#' @description +#' `submit_experiment()` is an asynchronous call to Azure Machine Learning +#' service to execute a trial on local or remote compute. Depending on the +#' configuration, `submit_experiment()` will automatically prepare your +#' execution environments, execute your code, and capture your source code +#' and results in the experiment's run history. +#' +#' To submit an experiment you first need to create a configuration object +#' describing how the experiment is to be run. The configuration depends on +#' the type of trial required. For a script run, provide an `Estimator` object +#' to the `config` parameter. For a HyperDrive run for hyperparameter tuning, +#' provide a `HyperDriveConfig` to `config`. +#' @param experiment The `Experiment` object. +#' @param config The `Estimator` or `HyperDriveConfig` object. +#' @param tags A named list of tags for the submitted run, e.g. +#' `list("tag" = "value")`. +#' @return The `ScriptRun` or `HyperDriveRun` object. +#' @export +#' @section Examples: +#' The following example submits an Estimator experiment. +#' ``` +#' ws <- load_workspace_from_config() +#' compute_target <- get_compute(ws, cluster_name = 'mycluster') +#' exp <- experiment(ws, name = 'myexperiment') +#' est <- estimator(source_directory = '.', +#' entry_script = 'train.R', +#' compute_target = compute_target) +#' run <- submit_experiment(exp, est) +#' ``` +#' +#' For an example of submitting a HyperDrive experiment, see the +#' "Examples" section of `hyperdrive_config()`. +#' @seealso +#' `estimator()`, `hyperdrive_config()` +#' @md +submit_experiment <- function(experiment, config, tags = NULL) { + experiment$submit(config, tags = tags) +} + +#' Return a generator of the runs for an experiment +#' +#' @description +#' Return a generator of the runs for an experiment, in reverse +#' chronological order. +#' @param experiment The `Experiment` object. +#' @param type Filter the returned generator of runs by the provided type. +#' @param tags Filter runs by tags. A named list eg. list("tag" = "value"). +#' @param properties Filter runs by properties. A named list +#' eg. list("property" = "value"). +#' @param include_children By default, fetch only top-level runs. +#' Set to TRUE to list all runs. +#' @return The list of runs matching supplied filters. +#' @export +#' @md +get_runs_in_experiment <- function(experiment, + type = NULL, + tags = NULL, + properties = NULL, + include_children = FALSE) { + experiment$get_runs(type = type, + tags = tags, + properties = properties, + include_children = include_children) +} diff --git a/R/hyperdrive.R b/R/hyperdrive.R new file mode 100644 index 00000000..7a311fb1 --- /dev/null +++ b/R/hyperdrive.R @@ -0,0 +1,703 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +### HyperDrive configuration ### + +#' Create a configuration for a HyperDrive run +#' +#' @description +#' The HyperDrive configuration includes information about hyperparameter +#' space sampling, termination policy, primary metric, estimator, and +#' the compute target to execute the experiment runs on. +#' +#' To submit the HyperDrive experiment, pass the `HyperDriveConfig` object +#' returned from this method to `submit_experiment()`. +#' @param hyperparameter_sampling The hyperparameter sampling space. +#' Can be a `RandomParameterSampling`, `GridParameterSampling`, or +#' `BayesianParameterSampling` object. +#' @param primary_metric_name A string of the name of the primary metric +#' reported by the experiment runs. +#' @param primary_metric_goal The `PrimaryMetricGoal` object. This +#' parameter determines if the primary metric is to be minimized or +#' maximized when evaluating runs. +#' @param max_total_runs An integer of the maximum total number of runs +#' to create. This is the upper bound; there may be fewer runs when the +#' sample space is smaller than this value. If both `max_total_runs` and +#' `max_duration_minutes` are specified, the hyperparameter tuning experiment +#' terminates when the first of these two thresholds is reached. +#' @param max_concurrent_runs An integer of the maximum number of runs to +#' execute concurrently. If `NULL`, all runs are launched in parallel. +#' The number of concurrent runs is gated on the resources available in the +#' specified compute target. Hence, you need to ensure that the compute target +#' has the available resources for the desired concurrency. +#' @param max_duration_minutes An integer of the maximum duration of the +#' HyperDrive run. Once this time is exceeded, any runs still executing are +#' cancelled. If both `max_total_runs` and `max_duration_minutes` are specified, +#' the hyperparameter tuning experiment terminates when the first of these two +#' thresholds is reached. +#' @param policy The early termination policy to use. Can be either a +#' `BanditPolicy`, `MedianStoppingPolicy`, or `TruncationSelectionPolicy` +#' object. If `NULL` (the default), no early termination policy will be used. +#' +#' The `MedianStoppingPolicy` with `delay_evaluation of = 5` is a good +#' termination policy to start with. These are conservative settings that can +#' provide 25%-35% savings with no loss on primary metric +#' (based on our evaluation data). +#' @param estimator The `Estimator` object. +#' @return The `HyperDriveConfig` object. +#' @export +#' @section Examples: +#' ``` +#' # Load the workspace +#' ws <- load_workspace_from_config() +#' +#' # Get the compute target +#' compute_target <- get_compute(ws, cluster_name = 'mycluster') +#' +#' # Define the primary metric goal +#' goal = primary_metric_goal("MAXIMIZE") +#' +#' # Define the early termination policy +#' early_termination_policy = median_stopping_policy(evaluation_interval = 1L, +#' delay_evaluation = 5L) +#' +#' # Create the estimator +#' est <- estimator(source_directory = '.', +#' entry_script = 'train.R', +#' compute_target = compute_target) +#' +#' # Create the HyperDrive configuration +#' hyperdrive_run_config = hyperdrive_config( +#' hyperparameter_sampling = param_sampling, +#' primary_metric_name = 'accuracy', +#' primary_metric_goal = goal, +#' max_total_runs = 100, +#' max_concurrent_runs = 4, +#' policy = early_termination_policy, +#' estimator = est) +#' +#' # Submit the HyperDrive experiment +#' exp <- experiment(ws, name = 'myexperiment') +#' run = submit_experiment(exp, hyperdrive_run_config) +#' ``` +#' @seealso +#' `submit_experiment()` +#' @md +hyperdrive_config <- function(hyperparameter_sampling, + primary_metric_name, + primary_metric_goal, + max_total_runs, + max_concurrent_runs = NULL, + max_duration_minutes = 10080L, + policy = NULL, + estimator = NULL) { + + azureml$train$hyperdrive$HyperDriveConfig(hyperparameter_sampling, + primary_metric_name, + primary_metric_goal, + max_total_runs, + max_concurrent_runs, + max_duration_minutes, + policy, estimator) +} + +### Specifying metric goal ### + +#' Define supported metric goals for hyperparameter tuning +#' +#' @description +#' A metric goal is used to determine whether a higher value for a metric +#' is better or worse. Metric goals are used when comparing runs based on +#' the primary metric. For example, you may want to maximize accuracy or +#' minimize error. +#' +#' The primary metric name and goal are specified to `hyperdrive_config()` +#' when you configure a HyperDrive run. +#' @param goal A string of the metric goal (either "MAXIMIZE" or "MINIMIZE"). +#' @return The `PrimaryMetricGoal` object. +#' @export +#' @md +primary_metric_goal <- function(goal) { + azureml$train$hyperdrive$PrimaryMetricGoal(goal) +} + +### Specifying early termination policy ### + +#' Define a Bandit policy for early termination of HyperDrive runs +#' +#' @description +#' Bandit is an early termination policy based on slack factor/slack amount +#' and evaluation interval. The policy early terminates any runs where the +#' primary metric is not within the specified slack factor/slack amount with +#' respect to the best performing training run. +#' @param slack_factor A double of the ratio of the allowed distance from +#' the best performing run. +#' @param slack_amount A double of the absolute distance allowed from the +#' best performing run. +#' @param evaluation_interval An integer of the frequency for applying policy. +#' @param delay_evaluation An integer of the number of intervals for which to +#' delay the first evaluation. +#' @return The `BanditPolicy` object. +#' @export +#' @section Details: +#' The Bandit policy takes the following configuration parameters: +#' * `slack_factor` or `slack_amount`: The slack allowed with respect to +#' the best performing training run. `slack_factor` specifies the +#' allowable slack as a ration. `slack_amount` specifies the allowable +#' slack as an absolute amount, instead of a ratio. +#' * `evaluation_interval`: Optional. The frequency for applying the policy. +#' Each time the training script logs the primary metric counts as one +#' interval. +#' * `delay_evaluation`: Optional. The number of intervals to delay the +#' policy evaluation. Use this parameter to avoid premature termination +#' of training runs. If specified, the policy applies every multiple of +#' `evaluation_interval` that is greater than or equal to `delay_evaluation`. +#' +#' Any run that doesn't fall within the slack factor or slack amount of the +#' evaluation metric with respect to the best performing run will be +#' terminated. +#' +#' Consider a Bandit policy with `slack_factor = 0.2` and +#' `evaluation_interval = 100`. Assume that run X is the currently best +#' performing run with an AUC (performance metric) of 0.8 after 100 intervals. +#' Further, assume the best AUC reported for a run is Y. This policy compares +#' the value `(Y + Y * 0.2)` to 0.8, and if smaller, cancels the run. +#' If `delay_evaluation = 200`, then the first time the policy will be applied +#' is at interval 200. +#' +#' Now, consider a Bandit policy with `slack_amount = 0.2` and +#' `evaluation_interval = 100`. If run 3 is the currently best performing run +#' with an AUC (performance metric) of 0.8 after 100 intervals, then any run +#' with an AUC less than 0.6 (`0.8 - 0.2`) after 100 iterations will be +#' terminated. Similarly, the `delay_evaluation` can also be used to delay the +#' first termination policy evaluation for a specific number of sequences. +#' @section Examples: +#' In this example, the early termination policy is applied at every interval +#' when metrics are reported, starting at evaluation interval 5. Any run whose +#' best metric is less than +#' `1 / (1 + 0.1)` or 91\% of the best performing run will be terminated. +#' ``` +#' early_termination_policy = bandit_policy(slack_factor = 0.1, +#' evaluation_interval = 1L, +#' delay_evaluation = 5L) +#' ``` +#' @md +bandit_policy <- function(slack_factor = NULL, + slack_amount = NULL, + evaluation_interval = 1L, + delay_evaluation = 0L) { + azureml$train$hyperdrive$BanditPolicy(evaluation_interval, + slack_factor, + slack_amount, + delay_evaluation) +} + +#' Define a median stopping policy for early termination of HyperDrive runs +#' +#' @description +#' Median stopping is an early termination policy based on running averages of +#' primary metrics reported by the runs. This policy computes running averages +#' across all training runs and terminates runs whose performance is worse than +#' the median of the running averages. Specifically, a run will be canceled at +#' interval N if its best primary metric reported up to interval N is worse than +#' the median of the running averages for intervals 1:N across all runs. +#' @param evaluation_interval An integer of the frequency for applying policy. +#' @param delay_evaluation An integer of the number of intervals for which to +#' delay the first evaluation. +#' @return The `MedianStoppingPolicy` object. +#' @export +#' @section Details: +#' The median stopping policy takes the following optional configuration +#' parameters: +#' * `evaluation_interval`: Optional. The frequency for applying the policy. +#' Each time the training script logs the primary metric counts as one +#' interval. +#' * `delay_evaluation`: Optional. The number of intervals to delay the +#' policy evaluation. Use this parameter to avoid premature termination +#' of training runs. If specified, the policy applies every multiple of +#' `evaluation_interval` that is greater than or equal to `delay_evaluation`. +#' +#' This policy is inspired from the research publication +#' [Google Vizier: A Service for Black-Box Optimization](https://ai.google/research/pubs/pub46180). +#' +#' If you are looking for a conservative policy that provides savings without +#' terminating promising jobs, you can use a `MedianStoppingPolicy` with +#' `evaluation_interval = 1` and `delay_evaluation = 5`. These are conservative +#' settings that can provide approximately 25%-35% savings with no loss on +#' the primary metric (based on our evaluation data). +#' @section Examples: +#' In this example, the early termination policy is applied at every +#' interval starting at evaluation interval 5. A run will be terminated at +#' interval 5 if its best primary metric is worse than the median of the +#' running averages over intervals 1:5 across all training runs. +#' ``` +#' early_termination_policy = median_stopping_policy(evaluation_interval = 1L, +#' delay_evaluation = 5L) +#' ``` +#' @md +median_stopping_policy <- function(evaluation_interval = 1L, + delay_evaluation = 0L) { + azureml$train$hyperdrive$MedianStoppingPolicy(evaluation_interval, + delay_evaluation) +} + +#' Define a truncation selection policy for early termination of HyperDrive runs +#' +#' @description +#' Truncation selection cancels a given percentage of lowest performing runs at +#' each evaluation interval. Runs are compared based on their performance on the +#' primary metric and the lowest X% are terminated. +#' +#' @param truncation_percentage An integer of the percentage of lowest +#' performing runs to terminate at each interval. +#' @param evaluation_interval An integer of the frequency for applying policy. +#' @param delay_evaluation An integer of the number of intervals for which to +#' delay the first evaluation. +#' @return The `TruncationSelectionPolicy` object. +#' @export +#' @section Details: +#' This policy periodically cancels the given percentage of runs that rank the +#' lowest for their performance on the primary metric. The policy strives for +#' fairness in ranking the runs by accounting for improving model performance +#' with training time. When ranking a relatively young run, the policy uses the +#' corresponding (and earlier) performance of older runs for comparison. +#' Therefore, runs aren't terminated for having a lower performance because they +#' have run for less time than other runs. +#' +#' The truncation selection policy takes the following configuration parameters: +#' * `truncation_percentage`: An integer of the percentage of lowest performing +#' runs to terminate at each evaluation interval. +#' * `evaluation_interval`: Optional. The frequency for applying the policy. +#' Each time the training script logs the primary metric counts as one +#' interval. +#' * `delay_evaluation`: Optional. The number of intervals to delay the +#' policy evaluation. Use this parameter to avoid premature termination +#' of training runs. If specified, the policy applies every multiple of +#' `evaluation_interval` that is greater than or equal to `delay_evaluation`. +#' +#' For example, when evaluating a run at a interval N, its performance is only +#' compared with the performance of other runs up to interval N even if they +#' reported metrics for intervals greater than N. +#' @section Examples: +#' In this example, the early termination policy is applied at every interval +#' starting at evaluation interval 5. A run will be terminated at interval 5 +#' if its performance at interval 5 is in the lowest 20% of performance of all +#' runs at interval 5. +#' ``` +#' early_termination_policy = truncation_selection_policy( +#' truncation_percentage = 20L, +#' evaluation_interval = 1L, +#' delay_evaluation = 5L) +#' ``` +#' @md +truncation_selection_policy <- function(truncation_percentage, + evaluation_interval = 1L, + delay_evaluation = 0L) { + azureml$train$hyperdrive$TruncationSelectionPolicy(truncation_percentage, + evaluation_interval, + delay_evaluation) +} + +### Specifying sampling space ### + +#' Define random sampling over a hyperparameter search space +#' +#' @description +#' In random sampling, hyperparameter values are randomly selected from the +#' defined search space. Random sampling allows the search space to include +#' both discrete and continuous hyperparameters. +#' @param parameter_space A named list containing each parameter and its +#' distribution, e.g. `list("parameter" = distribution)`. +#' @param properties A named list of additional properties for the algorithm. +#' @return The `RandomParameterSampling` object. +#' @export +#' @section Details: +#' In this sampling algorithm, parameter values are chosen from a set of +#' discrete values or a distribution over a continuous range. Functions you can +#' use include: +#' `choice()`, `randint()`, `uniform()`, `quniform()`, `loguniform()`, +#' `qloguniform()`, `normal()`, `qnormal()`, `lognormal()`, and `qlognormal()`. +#' @section Examples: +#' ``` +#' param_sampling <- random_parameter_sampling( { +#' 'learning_rate': normal(10, 3), +#' 'keep_probability': uniform(0.05, 0.1), +#' 'batch_size': choice(16, 32, 64, 128) +#' } +#' ) +#' ``` +#' @seealso +#' `choice()`, `randint()`, `uniform()`, `quniform()`, `loguniform()`, +#' `qloguniform()`, `normal()`, `qnormal()`, `lognormal()`, `qlognormal()` +#' @md +random_parameter_sampling <- function(parameter_space, properties = NULL) { + azureml$train$hyperdrive$RandomParameterSampling(parameter_space, properties) +} + +#' Define grid sampling over a hyperparameter search space +#' +#' @description +#' Grid sampling performs a simple grid search over all feasible values in +#' the defined search space. It can only be used with hyperparameters +#' specified using `choice()`. +#' @param parameter_space A named list containing each parameter and its +#' distribution, e.g. `list("parameter" = distribution)`. +#' @return The `GridParameterSampling` object. +#' @export +#' @section Examples: +#' ``` +#' param_sampling <- grid_parameter_sampling( { +#' 'num_hidden_layers': choice(1, 2, 3), +#' 'batch_size': choice(16, 32) +#' } +#' ) +#' ``` +#' @seealso +#' `choice()` +#' @md +grid_parameter_sampling <- function(parameter_space) { + azureml$train$hyperdrive$GridParameterSampling(parameter_space) +} + +#' Define Bayesian sampling over a hyperparameter search space +#' +#' @description +#' Bayesian sampling is based on the Bayesian optimization algorithm and makes +#' intelligent choices on the hyperparameter values to sample next. It picks +#' the sample based on how the previous samples performed, such that the new +#' sample improves the reported primary metric. +#' @param parameter_space A named list containing each parameter and its +#' distribution, e.g. `list("parameter" = distribution)`. +#' @return The `BayesianParameterSampling` object. +#' @export +#' @section Details: +#' When you use Bayesian sampling, the number of concurrent runs has an impact +#' on the effectiveness of the tuning process. Typically, a smaller number of +#' concurrent runs can lead to better sampling convergence, since the smaller +#' degree of parallelism increases the number of runs that benefit from +#' previously completed runs. +#' +#' Bayesian sampling only supports `choice()`, `uniform()`, and `quniform()` +#' distributions over the search space. +#' +#' Bayesian sampling does not support any early termination policy. When +#' using Bayesian parameter sampling, `early_termination_policy` must be +#' `NULL`. +#' @section Examples: +#' ``` +#' param_sampling <- bayesian_parameter_sampling( { +#' 'learning_rate': uniform(0.05, 0.1), +#' 'batch_size': choice(16, 32, 64, 128) +#' } +#' ) +#' ``` +#' @seealso +#' `choice()`, `uniform()`, `quniform()` +#' @md +bayesian_parameter_sampling <- function(parameter_space) { + azureml$train$hyperdrive$BayesianParameterSampling(parameter_space) +} + +### Parameter expressions for describing search space ### + +#' Specify a discrete set of options to sample from +#' +#' @description +#' Specify a discrete set of options to sample the hyperparameters +#' from. +#' @param options A list of discrete values to choose from, or +#' one or more comma-separated discrete values to choose from. +#' @return A list of the stochastic expression. +#' @export +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @md +choice <- function(options) { + azureml$train$hyperdrive$choice(options) +} + +#' Specify a set of random integers in the range `[0, upper)` +#' +#' @description +#' Specify a set of random integers in the range `[0, upper)` +#' to sample the hyperparameters from. +#' +#' The semantics of this distribution is that there is no more +#' correlation in the loss function between nearby integer values, +#' as compared with more distant integer values. This is an +#' appropriate distribution for describing random seeds, for example. +#' If the loss function is probably more correlated for nearby integer +#' values, then you should probably use one of the "quantized" continuous +#' distributions, such as either `quniform()`, `qloguniform()`, `qnormal()`, +#' or `qlognormal()`. +#' @param upper An integer of the upper bound for the range of +#' integers (exclusive). +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +randint <- function(upper) { + azureml$train$hyperdrive$randint(upper) +} + +#' Specify a uniform distribution of options to sample from +#' +#' @description +#' Specify a uniform distribution of options to sample the +#' hyperparameters from. +#' @param min_value A double of the minimum value in the range +#' (inclusive). +#' @param max_value A double of the maximum value in the range +#' (inclusive). +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +uniform <- function(min_value, max_value) { + azureml$train$hyperdrive$uniform(min_value, max_value) +} + +#' Specify a uniform distribution of the form +#' `round(uniform(min_value, max_value) / q) * q` +#' +#' @description +#' Specify a uniform distribution of the form +#' `round(uniform(min_value, max_value) / q) * q`. +#' +#' This is suitable for a discrete value with respect to which the objective +#' is still somewhat "smooth", but which should be bounded both above and below. +#' @param min_value A double of the minimum value in the range (inclusive). +#' @param max_value A double of the maximum value in the range (inclusive). +#' @param q An integer of the smoothing factor. +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +quniform <- function(min_value, max_value, q) { + azureml$train$hyperdrive$quniform(min_value, max_value, q) +} + +#' Specify a log uniform distribution +#' +#' @description +#' Specify a log uniform distribution. +#' +#' A value is drawn according to `exp(uniform(min_value, max_value))` so that +#' the logarithm of the return value is uniformly distributed. When optimizing, +#' this variable is constrained to the interval +#' `[exp(min_value), exp(max_value)]`. +#' @param min_value A double where the minimum value in the range will be +#' `exp(min_value)` (inclusive). +#' @param max_value A double where the maximum value in the range will be +#' `exp(min_value)` (inclusive). +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +loguniform <- function(min_value, max_value) { + azureml$train$hyperdrive$loguniform(min_value, max_value) +} + +#' Specify a uniform distribution of the form +#' `round(exp(uniform(min_value, max_value) / q) * q` +#' +#' @description +#' Specify a uniform distribution of the form +#' `round(exp(uniform(min_value, max_value) / q) * q`. +#' +#' This is suitable for a discrete variable with respect to which the objective +#' is "smooth", and gets smoother with the size of the value, but which should +#' be bounded both above and below. +#' @param min_value A double of the minimum value in the range (inclusive). +#' @param max_value A double of the maximum value in the range (inclusive). +#' @param q An integer of the smoothing factor. +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +qloguniform <- function(min_value, max_value, q) { + azureml$train$hyperdrive$qloguniform(min_value, max_value, q) +} + +#' Specify a real value that is normally-distributed with mean `mu` and standard +#' deviation `sigma` +#' +#' @description +#' Specify a real value that is normally-distributed with mean `mu` and +#' standard deviation `sigma`. +#' +#' When optimizing, this is an unconstrained variable. +#' @param mu A double of the mean of the normal distribution. +#' @param sigma A double of the standard deviation of the normal distribution. +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +normal <- function(mu, sigma) { + azureml$train$hyperdrive$normal(mu, sigma) +} + +#' Specify a normal distribution of the `form round(normal(mu, sigma) / q) * q` +#' +#' @description +#' Specify a normal distribution of the form `round(normal(mu, sigma) / q) * q`. +#' +#' Suitable for a discrete variable that probably takes a value around `mu`, +#' but is fundamentally unbounded. +#' @param mu A double of the mean of the normal distribution. +#' @param sigma A double of the standard deviation of the normal distribution. +#' @param q An integer of the smoothing factor. +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +qnormal <- function(mu, sigma, q) { + azureml$train$hyperdrive$qnormal(mu, sigma, q) +} + +#' Specify a normal distribution of the form `exp(normal(mu, sigma))` +#' +#' @description +#' Specify a normal distribution of the form `exp(normal(mu, sigma))`. +#' +#' The logarithm of the return value is normally distributed. When optimizing, +#' this variable is constrained to be positive. +#' @param mu A double of the mean of the normal distribution. +#' @param sigma A double of the standard deviation of the normal distribution. +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +lognormal <- function(mu, sigma) { + azureml$train$hyperdrive$lognormal(mu, sigma) +} + +#' Specify a normal distribution of the form +#' `round(exp(normal(mu, sigma)) / q) * q` +#' +#' @description +#' Specify a normal distribution of the form +#' `round(exp(normal(mu, sigma)) / q) * q`. +#' +#' Suitable for a discrete variable with respect to which the objective is +#' smooth and gets smoother with the size of the variable, which is bounded +#' from one side. +#' @param mu A double of the mean of the normal distribution. +#' @param sigma A double of the standard deviation of the normal distribution. +#' @param q An integer of the smoothing factor. +#' @return A list of the stochastic expression. +#' @seealso +#' `random_parameter_sampling()`, `grid_parameter_sampling()`, +#' `bayesian_parameter_sampling()` +#' @export +#' @md +qlognormal <- function(mu, sigma, q) { + azureml$train$hyperdrive$qlognormal(mu, sigma, q) +} + +### Retrieving run metrics ### + +#' Return the best performing run amongst all completed runs +#' +#' @description +#' Find and return the run that corresponds to the best performing run +#' amongst all the completed runs. +#' +#' The best performing run is identified solely based on the primary metric +#' parameter specified in the `HyperDriveConfig` (`primary_metric_name`). +#' The `PrimaryMetricGoal` governs whether the minimum or maximum of the +#' primary metric is used. To do a more detailed analysis of all the +#' run metrics launched by this HyperDrive run, use `get_child_run_metrics()`. +#' Only one of the runs is returned from `get_best_run_by_primary_metric()`, +#' even if several of the runs launched by this HyperDrive run reached +#' the same best metric. +#' @param hyperdrive_run The `HyperDriveRun` object. +#' @param include_failed If `TRUE`, include the failed runs. +#' @param include_canceled If `TRUE`, include the canceled runs. +#' @return The `Run` object. +#' @export +#' @md +get_best_run_by_primary_metric <- function(hyperdrive_run, + include_failed = TRUE, + include_canceled = TRUE) { + hyperdrive_run$get_best_run_by_primary_metric(include_failed, + include_canceled) +} + +#' Get the child runs sorted in descending order by +#' best primary metric +#' +#' @description +#' Return a list of child runs of the HyperDrive run sorted by their best +#' primary metric. The sorting is done according to the primary metric and +#' its goal: if it is maximize, then the child runs are returned in descending +#' order of their best primary metric. If `reverse = TRUE`, the order is +#' reversed. Each child in the result has run id, hyperparameters, best primary +#' metric value, and status. +#' +#' Child runs without the primary metric are discarded when +#' `discard_no_metric = TRUE`. Otherwise, they are appended to the list behind +#' other child runs with the primary metric. Note that the reverse option has no +#' impact on them. +#' @param hyperdrive_run The `HyperDriveRun` object. +#' @param top An integer of the number of top child runs to be returned. If `0` +#' (the default value), all child runs will be returned. +#' @param reverse If `TRUE`, the order will be reversed. This sorting only +#' impacts child runs with the primary metric. +#' @param discard_no_metric If `FALSE`, child runs without the primary metric +#' will be appended to the list returned. +#' @return The named list of child runs. +#' @export +#' @md +get_child_runs_sorted_by_primary_metric <- function(hyperdrive_run, + top = 0L, + reverse = FALSE, + discard_no_metric = FALSE) { + hyperdrive_run$get_children_sorted_by_primary_metric(top, reverse, + discard_no_metric) +} + +#' Get the hyperparameters for all child runs +#' +#' @description +#' Return the hyperparameters for all the child runs of the +#' HyperDrive run. +#' @param hyperdrive_run The `HyperDriveRun` object. +#' @return The named list of hyperparameters where element name +#' is the run_id, e.g. `list("run_id" = hyperparameters)`. +#' @export +#' @md +get_child_run_hyperparameters <- function(hyperdrive_run) { + hyperdrive_run$get_hyperparameters() +} + +#' Get the metrics from all child runs +#' +#' @description +#' Return the metrics from all the child runs of the +#' HyperDrive run. +#' @param hyperdrive_run The `HyperDriveRun` object. +#' @return The named list of metrics where element name is +#' the run_id, e.g. `list("run_id" = metrics)`. +#' @export +#' @md +get_child_run_metrics <- function(hyperdrive_run) { + hyperdrive_run$get_metrics() +} diff --git a/R/install.R b/R/install.R new file mode 100644 index 00000000..9bf69397 --- /dev/null +++ b/R/install.R @@ -0,0 +1,55 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Install azureml sdk package +#' @param version azureml sdk package version +#' @param envname name of environment to create +#' @param conda_python_version version of python for conda environment +#' @export +install_azureml <- function(version = NULL, + envname = "r-azureml", + conda_python_version = "3.6") { + main_package <- "azureml-sdk" + default_packages <- c("numpy") + + # set version if provided + if (!is.null(version)) { + main_package <- paste(main_package, "==", version, sep = "") + } + + # check for anaconda installation + if (is.null(reticulate::conda_binary())) { + stop("Anaconda not installed or not in system path.") + } + + # remove conda environment if already exists + envs <- reticulate::conda_list() + if (envname %in% envs$name) { + msg <- sprintf(paste("Environment \"%s\" already exists.", + "Remove the environment..."), + envname) + message(msg) + reticulate::conda_remove(envname) + } + + # create conda environment + msg <- paste("Creating environment: ", envname) + message(msg) + py_version <- paste("python=", conda_python_version, sep = "") + reticulate::conda_create(envname, packages = py_version) + + # install packages + reticulate::py_install( + packages = c(main_package, default_packages), + envname = envname, + method = "conda", + conda = "auto", + pip = TRUE) + + cat("\nInstallation complete.\n\n") + + if (rstudioapi::isAvailable() && rstudioapi::hasFun("restartSession")) + rstudioapi::restartSession() + + invisible(NULL) +} diff --git a/R/keyvault.R b/R/keyvault.R new file mode 100644 index 00000000..ac634af4 --- /dev/null +++ b/R/keyvault.R @@ -0,0 +1,70 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Add secrets to a keyvault +#' +#' @description +#' Add a named list of secrets into the keyvault associated with the +#' workspace. +#' @param keyvault The `Keyvault` object. +#' @param secrets The named list of secrets to be added to the keyvault, +#' where element name corresponds to the secret name. +#' @export +#' @section Examples: +#' ``` +#' ws <- load_workspace_from_config() +#' my_secret <- Sys.getenv("MY_SECRET") +#' keyvault <- get_default_keyvault(ws) +#' set_secrets(list("mysecret" = my_secret)) +#' ``` +#' @md +set_secrets <- function(keyvault, secrets) { + keyvault$set_secrets(secrets) + invisible(NULL) +} + +#' Get secrets from a keyvault +#' +#' @description +#' Returns the secret values from the keyvault associated with the +#' workspace for a given set of secret names. For runs submitted using +#' `submit_experiment()`, you can use `get_secrets_from_run()` instead, +#' as that method shortcuts workspace instantiation (since a submitted +#' run is aware of its workspace). +#' @param keyvault The `Keyvault` object. +#' @param secrets A vector of secret names. +#' @return A named list of found and not found secrets, where element +#' name corresponds to the secret name. If a secret was not found, the +#' corresponding element will be `NULL`. +#' @export +#' @md +get_secrets <- function(keyvault, secrets) { + keyvault$get_secrets(secrets) +} + +#' Delete secrets from a keyvault +#' +#' @description +#' Delete secrets from the keyvault associated with the workspace for +#' a specified set of secret names. +#' @param keyvault The `Keyvault` object. +#' @param secrets A vector of secret names. +#' @export +#' @md +delete_secrets <- function(keyvault, secrets) { + keyvault$delete_secrets(secrets) + invisible(NULL) +} + +#' List the secrets in a keyvault +#' +#' @description +#' Returns the list of secret names for all the secrets in the keyvault +#' associated with the workspace. +#' @param keyvault The `Keyvault` object. +#' @return A list of secret names. +#' @export +#' @md +list_secrets <- function(keyvault) { + keyvault$list_secrets() +} diff --git a/R/model.R b/R/model.R new file mode 100644 index 00000000..0b200359 --- /dev/null +++ b/R/model.R @@ -0,0 +1,274 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Retrieve the Model object from the cloud. +#' @param workspace The workspace object containing the Model to retrieve +#' @param name Will retrieve the latest model with the corresponding name, if +#' it exists +#' @param id Will retrieve the model with the corresponding ID, if it exists +#' @param tags Optional, will filter based on the provided list, searching by +#' either 'key' or '[key, value]'. +#' @param properties Optional, will filter based on the provided list, +#' searching by either 'key' or '[key, value]'. +#' @param version When provided along with name, will get the specific version +#' of the specified named model, if it exists +#' @param run_id Optional, will filter based on the provided ID. +#' @return A model object, if one is found in the provided workspace +#' @export +get_model <- function(workspace, + name = NULL, + id = NULL, + tags = NULL, + properties = NULL, + version = NULL, + run_id = NULL) { + model <- azureml$core$Model(workspace, + name, + id, + tags, + properties, + version, + run_id) + invisible(model) +} + +#' Register a model with the provided workspace. +#' @param workspace The workspace to register the model under +#' @param model_path String which points to the path on the local file system +#' where the model assets are located. This can be a direct pointer to a single +#' file or folder. If pointing to a folder, the child_paths parameter can be +#' used to specify individual files to bundle together as the Model object, +#' as opposed to using the entire contents of the folder. +#' @param model_name The name to register the model with +#' @param tags Dictionary of key value tags to give the model +#' @param properties Dictionary of key value properties to give the model. +#' These properties cannot be changed after model creation, however new key +#' value pairs can be added +#' @param description A text description of the model +#' @param child_paths If provided in conjunction with a model_path to a folder, +#' only the specified files will be bundled into the Model object. +#' @return The registered Model object +#' @export +register_model <- function(workspace, + model_path, + model_name, + tags = NULL, + properties = NULL, + description = NULL, + child_paths = NULL) { + model <- azureml$core$Model$register(workspace, + model_path, + model_name, + tags = tags, + properties = properties, + description = description, + child_paths = child_paths) + invisible(model) +} + +#' Download model to target_dir of local file system. +#' @param model The model to download +#' @param target_dir Path to directory for where to download the model. +#' Defaults to "." +#' @param exist_ok Boolean to replace downloaded dir/files if exists. +#' Defaults to FALSE +#' @return string path to file or folder of model +#' @export +download_model <- function(model, target_dir = ".", exist_ok = FALSE) { + model_path <- model$download(target_dir, exist_ok) + invisible(model_path) +} + +#' Convert this Model into a json serialized dictionary +#' @param model The model to download +#' @return The json representation of this Model +serialize_model <- function(model) { + result <- model$serialize() + invisible(result) +} + +#' Convert a json object into a Model object. +#' @param workspace The workspace object the model is registered under +#' @param model_payload A json object to convert to a Model object +#' @return The Model representation of the provided json object +deserialize_to_model <- function(workspace, model_payload) { + model <- azureml$core$Model$deserialize(workspace, model_payload) + invisible(model) +} + +#' Delete this model from its associated workspace. +#' @param model The model to download +#' @export +delete_model <- function(model) { + model$delete() +} + +#' Deploy a Webservice from zero or more model objects. +#' @param workspace A Workspace object to associate the Webservice with +#' @param name The name to give the deployed service. Must be unique to the +#' workspace, only consist of lowercase letters, numbers, or dashes, start with +#' a letter, and be between 3 and 32 characters long. +#' @param models A list of model objects. Can be an empty list. +#' @param inference_config An InferenceConfig object used to determine required +#' model properties. +#' @param deployment_config A WebserviceDeploymentConfiguration used to +#' configure the webservice. If one is not provided, an empty configuration +#' object will be used based on the desired target. +#' @param deployment_target A azureml.core.ComputeTarget to deploy the +#' Webservice to. As Azure Container Instances has no associated +#' azureml.core.ComputeTarget, leave this parameter as None to deploy to Azure +#' Container Instances. +#' @return A Webservice object corresponding to the deployed webservice +#' @export +deploy_model <- function(workspace, + name, + models, + inference_config, + deployment_config = NULL, + deployment_target = NULL) { + webservice <- azureml$core$Model$deploy(workspace, + name, + models, + inference_config, + deployment_config, + deployment_target) + invisible(webservice) +} + +#' Create a model package in the form of a Docker image or Dockerfile build +#' context +#' @param workspace The workspace in which to create the package. +#' @param models A list of Model objects to include in the package. Can be an +#' empty list. +#' @param inference_config An InferenceConfig object to configure the +#' operation of the models. This must include an Environment object. +#' @param generate_dockerfile Whether to create a Dockerfile that can be run +#' locally instead of building an image. +#' @return A ModelPackage object. +#' @export +package_model <- function(workspace, + models, + inference_config, + generate_dockerfile = FALSE) { + model_package <- azureml$core$Model$package(workspace, + models, + inference_config, + generate_dockerfile) + invisible(model_package) +} + +#' Return a ContainerRegistry object for where the image +#' (or base image, for Dockerfile packages) is stored. +#' @param package Package created with model(s) and dependencies. +#' @return ContainerRegistry object +#' @export +get_model_package_container_registry <- function(package) { + package$get_container_registry() +} + +#' Retrieve the package creation logs. +#' @param package Package created with model(s) and dependencies. +#' @param decode Whether to decode the raw log bytes to a string. +#' @param offset Byte offset from which to start reading the logs. +#' @return Package creation logs. +#' @export +get_model_package_creation_logs <- function(package, + decode = TRUE, + offset = 0) { + package$get_logs(decode, offset) +} + +#' Pull the package output to the local machine. +#' This can only be used with a Docker image package. +#' @param package Package created with model(s) and dependencies. +#' @export +pull_model_package_image <- function(package) { + package$pull() +} + +#' Save the package output to a local directory. +#' This can only be used with a Dockerfile package. +#' @param package Package created with model(s) and dependencies. +#' @param output_directory Local directory that will be created to contain +#' the contents of the package. +#' @export +save_model_package_files <- function(package, output_directory) { + package$save(output_directory) +} + +#' Wait for the package to finish creating. +#' @param package Package created with model(s) and dependencies. +#' @param show_output Boolean option to print more verbose output. Defaults to +#' FALSE. +#' @export +wait_for_model_package_creation <- function(package, show_output = FALSE) { + package$wait_for_creation(show_output) +} + +#' Creates the Model deployment config specific to model deployments. +#' @param entry_script Path to local file that contains the code to run for the +#' image. +#' @param source_directory paths to folders that contains all files to create +#' the image. +#' @param description A description to give this image. +#' @param environment An environment object to use for the deployment. +#' Doesn't have to be registered. A user should provide either this, or the +#' other parameters, not both. The individual parameters will NOT serve +#' as an override for the environment object. Exceptions include +#' `entry_script`, `source_directory` and `description`. +#' @return An InferenceConfig object +#' @export +inference_config <- function(entry_script, + source_directory = NULL, + description = NULL, + environment = NULL) { + generate_score_python_wrapper(entry_script, source_directory) + if (!is.null(environment)) { + environment$inferencing_stack_version <- "latest" + } + + inference_config <- azureml$core$model$InferenceConfig( + entry_script = "_generated_score.py", + source_directory = source_directory, + description = description, + environment = environment) + + invisible(inference_config) +} + +#' Generate _generated_score.py file for the corresponding entry_script file +#' @param entry_script Path to local file that contains the code to run for +#' the image. +#' @param source_directory paths to folders that contains all files to +#' create the image. +generate_score_python_wrapper <- function(entry_script, source_directory) { + score_py_template <- sprintf("# This is auto-generated python wrapper. +import rpy2.robjects as robjects +import os +import json + +def init(): + global r_run + + score_r_path = os.path.join(os.path.dirname( + os.path.realpath(__file__)), + \"%s\") + + # handle path for windows os + score_r_path = score_r_path.replace('\\\\', '/') + robjects.r.source(\"{}\".format(score_r_path)) + r_run = robjects.r['init']() + +def run(input_data): + dataR = r_run(input_data)[0] + return json.loads(dataR)", + entry_script) + + if (is.null(source_directory)) + source_directory <- "." + score_py_file_path <- file.path(source_directory, "_generated_score.py") + py_file <- file(score_py_file_path, open = "w") + writeLines(score_py_template, py_file) + close(py_file) + invisible(NULL) +} diff --git a/R/modules.R b/R/modules.R new file mode 100644 index 00000000..6c877e12 --- /dev/null +++ b/R/modules.R @@ -0,0 +1,8 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' azureml module +#' User can access functions/modules in azureml that are not exposed through the +#' exported R functions. +#' @export +azureml <- NULL diff --git a/R/package.R b/R/package.R new file mode 100644 index 00000000..33a5c8b6 --- /dev/null +++ b/R/package.R @@ -0,0 +1,37 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' @importFrom reticulate import use_condaenv py_str + +.onLoad <- function(libname, pkgname) { + use_condaenv("r-azureml") + + # delay load azureml + azureml <<- import("azureml", delay_load = list( + environment = "r-azureml", + + on_load = function() { + # This function will be called on successful load + ver <- toString(utils::packageVersion("azureml")) + azureml$"_base_sdk_common"$user_agent$append("azureml-r-sdk", ver) + }, + + on_error = function(e) { + if (grepl("No module named azureml", e$message)) { + stop("Use azureml::install_azureml() to install azureml python ", + call. = FALSE) + } else { + stop(e$message, call. = FALSE) + } + } + )) + + # for solving login hang issue on rstudio server + if (grepl("rstudio-server", Sys.getenv("RS_RPOSTBACK_PATH"))) { + webbrowser <- reticulate::import("webbrowser") + # this will force to use device code login + webbrowser$"_tryorder" <- list() + } + + invisible(NULL) +} diff --git a/R/run.R b/R/run.R new file mode 100644 index 00000000..c915937c --- /dev/null +++ b/R/run.R @@ -0,0 +1,321 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Get the metrics for run +#' @param run Run object +#' @return named list containing metrics associated with the run. +#' @export +get_run_metrics <- function(run) { + run$get_metrics() +} + +#' Wait for the completion of this run +#' @param run Run object +#' @param show_output print verbose output to console +#' @export +wait_for_run_completion <- function(run, show_output = TRUE) { + tryCatch({ + run$wait_for_completion(show_output) + }, + error = function(e) { + if (show_output && grepl("UnicodeEncode", e$message, )) { + invisible(wait_until_run_completes(run)) + } else { + stop(message(e)) + } + }) +} + +wait_until_run_completes <- function(run) { + # print dots if we get here due to unicode error on windows rstudio console + # terminals + while (run$get_status() %in% azureml$core$run$RUNNING_STATES) { + cat(".") + Sys.sleep(1) + } +} + +#' Gets the context object for a run +#' @param allow_offline Allow the service context to fall back to offline mode +#' so that the training script can be tested locally without submitting a job +#' with the SDK. +#' @return The run object. +#' @export +get_current_run <- function(allow_offline = TRUE) { + azureml$core$run$Run$get_context(allow_offline) +} + +#' Cancel run +#' @param run run to be cancelled +#' @return TRUE if cancellation was successful, else FALSE +#' @export +cancel_run <- function(run) { + run$cancel() +} + +#' Gets the Run object from a given run id +#' @param experiment The containing experiment. +#' @param run_id The run id for the run. +#' @return The run object. +#' @export +get_run <- function(experiment, run_id) { + run <- azureml$core$run$Run(experiment, run_id) + invisible(run) +} + +#' Download an associated file from storage. +#' @param run the run object +#' @param name The name of the artifact to be downloaded +#' @param output_file_path The local path where to store the artifact +#' @export +download_file_from_run <- function(run, name, output_file_path = NULL) { + run$download_file(name, output_file_path) + invisible(NULL) +} + +#' Download files from a given storage prefix (folder name) or +#' the entire container if prefix is unspecified. +#' @param run the run object +#' @param prefix the filepath prefix within the container from +#' which to download all artifacts +#' @param output_directory optional directory that all artifact paths use +#' as a prefix +#' @param output_paths optional filepaths in which to store the downloaded +#' artifacts. Should be unique and match length of paths. +#' @param batch_size number of files to download per batch +#' @export +download_files_from_run <- function(run, prefix = NULL, output_directory = NULL, + output_paths = NULL, batch_size = 100L) { + run$download_files(prefix = prefix, + output_directory = output_directory, + output_paths = output_paths, + batch_size = batch_size) + invisible(NULL) +} + +#' Get the definition, status information, current log files and other details +#' of the run. +#' @param run the run object +#' @return Return the details for the run +#' @export +get_run_details <- function(run) { + run$get_details() +} + +#' Return run status including log file content. +#' @param run the run object +#' @return Returns the status for the run with log file contents +#' @export +get_run_details_with_logs <- function(run) { + run$get_details_with_logs() +} + +#' List the files that are stored in association with the run. +#' @param run the run object +#' @return The list of paths for existing artifacts +#' @export +get_run_file_names <- function(run) { + run$get_file_names() +} + +#' Get the secret values for a given list of secret names. +#' Get a dictionary of found and not found secrets for the list of names +#' provided. +#' @param run the run object +#' @param secrets List of secret names to retrieve the values for +#' @return Returns a dictionary of found and not found secrets +#' @export +get_secrets_from_run <- function(run, secrets) { + run$get_secrets(secrets) +} + +#' Log metric to run +#' @param name name of the metric +#' @param value value of the metric +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_metric_to_run <- function(name, value, run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log(name, value) + run$flush() + invisible(NULL) +} + +#' Log a accuracy table to the artifact store. +#' @param name The name of the accuracy table +#' @param value json containing name, version, and data properties +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_accuracy_table_to_run <- function(name, value, description = "", + run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_accuracy_table(name, value, description) + run$flush() + invisible(NULL) +} + +#' Log a confusion matrix to the artifact store. +#' @param name The name of the confusion matrix +#' @param value json containing name, version, and data properties +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_confusion_matrix_to_run <- function(name, value, description = "", + run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_confusion_matrix(name, value, description) + run$flush() + invisible(NULL) +} + +#' Log an image metric to the run record. +#' @param name The name of metric +#' @param path The path or stream of the image +#' @param plot The plot to log as an image +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_image_to_run <- function(name, path = NULL, plot = NULL, + description = "", run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_image(name, path = path, plot = plot, description = description) + run$flush() + invisible(NULL) +} + +#' Log a list metric value to the run with the given name. +#' @param name The name of metric +#' @param value The value of the metric +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_list_to_run <- function(name, value, description = "", run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_list(name, value, description) + run$flush() + invisible(NULL) +} + +#' Log a predictions to the artifact store. +#' @param name The name of the predictions +#' @param value json containing name, version, and data properties +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_predictions_to_run <- function(name, value, description = "", run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_predictions(name, value, description) + run$flush() + invisible(NULL) +} + +#' Log a residuals to the artifact store. +#' @param name The name of the predictions +#' @param value json containing name, version, and data properties +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_residuals_to_run <- function(name, value, description = "", run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_residuals(name, value, description) + run$flush() + invisible(NULL) +} + +#' Log a row metric to the run with the given name. +#' @param name The name of metric +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @param ... Each named parameter generates a column with the value specified. +#' @export +log_row_to_run <- function(name, description = "", run = NULL, ...) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_row(name, description = description, ...) + run$flush() + invisible(NULL) +} + +#' Log a table metric to the run with the given name. +#' @param name The name of metric +#' @param value The table value of the metric (dictionary where keys are +#' columns to be posted to the service) +#' @param description An optional metric description +#' @param run Run object. If not specified, will default to current run from +#' service context. +#' @export +log_table_to_run <- function(name, value, description = "", run = NULL) { + if (is.null(run)) { + run <- get_current_run() + } + run$log_table(name, value, description) + run$flush() + invisible(NULL) +} + +#' Plot table of run details in Viewer +#' @param run run used for plotting +#' @export +view_run_details <- function(run) { + status <- run$get_status() + details <- run$get_details() + web_view_link <- paste0('', + "Link", "") + + if (status == "Completed" || status == "Failed") { + diff <- (parsedate::parse_iso_8601(details$endTimeUtc) - + parsedate::parse_iso_8601(details$startTimeUtc)) + duration <- paste(as.numeric(diff), "mins") + } + else { + duration <- "-" + } + + df <- matrix(list("Run Id", + "Status", + "Start Time", + "Duration", + "Target", + "Script Name", + "Arguments", + "Web View", + run$id, + status, + format(parsedate::parse_iso_8601(details$startTimeUtc), + format = "%B %d %Y %H:%M:%S"), + duration, + details$runDefinition$target, + details$runDefinition$script, + toString(details$runDefinition$arguments), + web_view_link), + nrow = 8, + ncol = 2) + + DT::datatable(df, escape = FALSE, rownames = FALSE, colnames = c(" ", " "), + caption = paste(unlist(details$warnings), collapse = "\r\n"), + options = list(dom = "t", scrollY = TRUE)) +} diff --git a/R/webservice-aci.R b/R/webservice-aci.R new file mode 100644 index 00000000..e531e67b --- /dev/null +++ b/R/webservice-aci.R @@ -0,0 +1,105 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create a configuration object for deploying an ACI Webservice. +#' @param cpu_cores The number of cpu cores to allocate for this Webservice. +#' Can be a decimal. +#' @param memory_gb The amount of memory (in GB) to allocate for this +#' Webservice. Can be a decimal. Defaults to 0.5 +#' @param tags Dictionary of key value tags to give this Webservice +#' @param properties Dictionary of key value properties to give this +#' Webservice. These properties cannot be changed after deployment, however +#' new key value pairs can be added +#' @param description A description to give this Webservice +#' @param location The Azure region to deploy this Webservice to. If not +#' specified the Workspace location will be used. More details on available +#' regions can be found here: +#' https://azure.microsoft.com/en-us/global-infrastructure/services/?regions=all&products=container-instances +#' @param auth_enabled Whether or not to enable auth for this Webservice. +#' Defaults to FALSE +#' @param ssl_enabled Whether or not to enable SSL for this Webservice. +#' Defaults to FALSE +#' @param enable_app_insights Whether or not to enable AppInsights for this +#' Webservice. Defaults to FALSE +#' @param ssl_cert_pem_file The cert file needed if SSL is enabled +#' @param ssl_key_pem_file The key file needed if SSL is enabled +#' @param ssl_cname The cname for if SSL is enabled +#' @param dns_name_label The dns name label for the scoring endpoint. +#' If not specified a unique dns name label will be generated for the scoring +#' endpoint. +#' @return AciServiceDeploymentConfiguration object to use when deploying a +#' Webservice object +#' @export +aci_webservice_deployment_config <- function(cpu_cores = NULL, + memory_gb = NULL, + tags = NULL, + properties = NULL, + description = NULL, + location = NULL, + auth_enabled = NULL, + ssl_enabled = NULL, + enable_app_insights = NULL, + ssl_cert_pem_file = NULL, + ssl_key_pem_file = NULL, + ssl_cname = NULL, + dns_name_label = NULL) { + azureml$core$webservice$AciWebservice$deploy_configuration( + cpu_cores, + memory_gb, + tags, + properties, + description, + location, + auth_enabled, + ssl_enabled, + enable_app_insights, + ssl_cert_pem_file, + ssl_key_pem_file, + ssl_cname, + dns_name_label) +} + +#' Update the Webservice with provided properties. +#' Values left as None will remain unchanged in this Webservice. +#' @param webservice AciWebservice object. +#' @param tags Dictionary of key value tags to give this Webservice. +#' Will replace existing tags. +#' @param properties Dictionary of key value properties to add to existing +#' properties dictionary. +#' @param description A description to give this Webservice. +#' @param auth_enabled Enable or disable auth for this Webservice. +#' @param ssl_enabled Whether or not to enable SSL for this Webservice. +#' @param ssl_cert_pem_file The cert file needed if SSL is enabled. +#' @param ssl_key_pem_file The key file needed if SSL is enabled. +#' @param ssl_cname The cname for if SSL is enabled. +#' @param enable_app_insights Whether or not to enable AppInsights for this +#' Webservice. +#' @param models A list of Model objects to package into the updated service. +#' @param inference_config An InferenceConfig object used to provide the +#' required model deployment properties. +#' @export +update_aci_webservice <- function(webservice, + tags = NULL, + properties = NULL, + description = NULL, + auth_enabled = NULL, + ssl_enabled = NULL, + ssl_cert_pem_file = NULL, + ssl_key_pem_file = NULL, + ssl_cname = NULL, + enable_app_insights = NULL, + models = NULL, + inference_config = NULL) { + webservice$update(tags = tags, + properties = properties, + description = description, + auth_enabled = auth_enabled, + ssl_enabled = ssl_enabled, + ssl_cert_pem_file = ssl_cert_pem_file, + ssl_key_pem_file = ssl_key_pem_file, + ssl_cname = ssl_cname, + enable_app_insights = enable_app_insights, + models = models, + inference_config = inference_config) + invisible(NULL) +} diff --git a/R/webservice-aks.R b/R/webservice-aks.R new file mode 100644 index 00000000..16b2cc3f --- /dev/null +++ b/R/webservice-aks.R @@ -0,0 +1,229 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create a configuration object for deploying to an AKS compute target. +#' @param autoscale_enabled Whether or not to enable autoscaling for this +#' Webservice. Defaults to True if num_replicas is None +#' @param autoscale_min_replicas The minimum number of containers to use when +#' autoscaling this Webservice. Defaults to 1 +#' @param autoscale_max_replicas The maximum number of containers to use when +#' autoscaling this Webservice. Defaults to 10 +#' @param autoscale_refresh_seconds How often the autoscaler should attempt to +#' scale this Webservice. Defaults to 1 +#' @param autoscale_target_utilization The target utilization (in percent out +#' of 100) the autoscaler should attempt to maintain for this Webservice. +#' Defaults to 70 +#' @param auth_enabled Whether or not to enable key auth for this Webservice. +#' Defaults to TRUE +#' @param cpu_cores The number of cpu cores to allocate for this Webservice. +#' Can be a decimal. Defaults to 0.1 +#' @param memory_gb The amount of memory (in GB) to allocate for this +#' Webservice. Can be a decimal. Defaults to 0.5 +#' @param enable_app_insights Whether or not to enable Application Insights +#' logging for this Webservice. Defaults to FALSE +#' @param scoring_timeout_ms A timeout to enforce for scoring calls to this +#' Webservice. Defaults to 60000 +#' @param replica_max_concurrent_requests The number of maximum concurrent +#' requests per node to allow for this Webservice. Defaults to 1 +#' @param max_request_wait_time The maximum amount of time a request will stay +#' in the queue (in milliseconds) before returning a 503 error. Defaults to 500 +#' @param num_replicas The number of containers to allocate for this +#' Webservice. No default, if this parameter is not set then the autoscaler is +#' enabled by default. +#' @param primary_key A primary auth key to use for this Webservice +#' @param secondary_key A secondary auth key to use for this Webservice +#' @param tags Dictionary of key value tags to give this Webservice +#' @param properties Dictionary of key value properties to give this +#' Webservice. These properties cannot be changed after deployment, however new +#' key value pairs can be added +#' @param description A description to give this Webservice +#' @param gpu_cores The number of gpu cores to allocate for this Webservice. +#' Default is 1 +#' @param period_seconds How often (in seconds) to perform the liveness probe. +#' Default to 10 seconds. Minimum value is 1. +#' @param initial_delay_seconds Number of seconds after the container has +#' started before liveness probes are initiated. Defaults to 310 +#' @param timeout_seconds Number of seconds after which the liveness probe +#' times out. Defaults to 2 second. Minimum value is 1 +#' @param success_threshold Minimum consecutive successes for the liveness +#' probe to be considered successful after having failed. Defaults to 1. +#' Minimum value is 1. +#' @param failure_threshold When a Pod starts and the liveness probe fails, +#' Kubernetes will try failureThreshold times before giving up. Defaults to 3. +#' Minimum value is 1. +#' @param namespace The Kubernetes namespace in which to deploy this +#' Webservice: up to 63 lowercase alphanumeric ('a'-'z', '0'-'9') and hyphen +#' ('-') characters. The first and last characters cannot be hyphens. +#' @param token_auth_enabled Whether or not to enable Token auth for this +#' Webservice. If this is enabled, users can access this Webservice by fetching +#' access token using their Azure Active Directory credentials. +#' Defaults to FALSE +#' @return AksServiceDeploymentConfiguration object +#' @export +aks_webservice_deployment_config <- function( + autoscale_enabled = NULL, + autoscale_min_replicas = NULL, + autoscale_max_replicas = NULL, + autoscale_refresh_seconds = NULL, + autoscale_target_utilization = NULL, + auth_enabled = NULL, + cpu_cores = NULL, + memory_gb = NULL, + enable_app_insights = NULL, + scoring_timeout_ms = NULL, + replica_max_concurrent_requests = NULL, + max_request_wait_time = NULL, + num_replicas = NULL, + primary_key = NULL, + secondary_key = NULL, + tags = NULL, + properties = NULL, + description = NULL, + gpu_cores = NULL, + period_seconds = NULL, + initial_delay_seconds = NULL, + timeout_seconds = NULL, + success_threshold = NULL, + failure_threshold = NULL, + namespace = NULL, + token_auth_enabled = NULL) { + config <- azureml$core$webservice$AksWebservice$deploy_configuration( + autoscale_enabled = autoscale_enabled, + autoscale_min_replicas = autoscale_min_replicas, + autoscale_max_replicas = autoscale_max_replicas, + autoscale_refresh_seconds = autoscale_refresh_seconds, + autoscale_target_utilization = autoscale_target_utilization, + auth_enabled = auth_enabled, + cpu_cores = cpu_cores, + memory_gb = memory_gb, + enable_app_insights = enable_app_insights, + scoring_timeout_ms = scoring_timeout_ms, + replica_max_concurrent_requests = replica_max_concurrent_requests, + max_request_wait_time = max_request_wait_time, + num_replicas = num_replicas, + primary_key = primary_key, + secondary_key = secondary_key, + tags = tags, + properties = properties, + description = description, + gpu_cores = gpu_cores, + period_seconds = period_seconds, + initial_delay_seconds = initial_delay_seconds, + timeout_seconds = timeout_seconds, + success_threshold = success_threshold, + failure_threshold = failure_threshold, + namespace = namespace, + token_auth_enabled = token_auth_enabled) + invisible(config) +} + + +#' Update the Webservice with provided properties. +#' Values left as None will remain unchanged in this Webservice. +#' @param webservice AciWebservice object. +#' @param autoscale_enabled Enable or disable autoscaling of this Webservice +#' @param autoscale_min_replicas The minimum number of containers to use when +#' autoscaling this Webservice +#' @param autoscale_max_replicas The maximum number of containers to use when +#' autoscaling this Webservice +#' @param autoscale_refresh_seconds How often the autoscaler should attempt to +#' scale this Webservice +#' @param autoscale_target_utilization The target utilization (in percent out +#' of 100) the autoscaler should attempt to maintain for this Webservice +#' @param auth_enabled Whether or not to enable auth for this Webservice +#' @param cpu_cores The number of cpu cores to allocate for this Webservice. +#' Can be a decimal +#' @param memory_gb The amount of memory (in GB) to allocate for this +#' Webservice. Can be a decimal +#' @param enable_app_insights Whether or not to enable Application Insights +#' logging for this Webservice +#' @param scoring_timeout_ms A timeout to enforce for scoring calls to this +#' Webservice +#' @param replica_max_concurrent_requests The number of maximum concurrent +#' requests per node to allow for this Webservice +#' @param max_request_wait_time The maximum amount of time a request will stay +#' in the queue (in milliseconds) before returning a 503 error +#' @param num_replicas The number of containers to allocate for this Webservice +#' @param tags Dictionary of key value tags to give this Webservice. Will +#' replace existing tags. +#' @param properties Dictionary of key value properties to add to existing +#' properties dictionary +#' @param description A description to give this Webservice +#' @param models A list of Model objects to package with the updated service +#' @param inference_config An InferenceConfig object used to provide the +#' required model deployment properties. +#' @param gpu_cores The number of gpu cores to allocate for this Webservice +#' @param period_seconds How often (in seconds) to perform the liveness probe. +#' Default to 10 seconds. Minimum value is 1. +#' @param initial_delay_seconds Number of seconds after the container has +#' started before liveness probes are initiated. +#' @param timeout_seconds Number of seconds after which the liveness probe +#' times out. Defaults to 1 second. Minimum value is 1. +#' @param success_threshold Minimum consecutive successes for the liveness +#' probe to be considered successful after having failed. Defaults to 1. +#' Minimum value is 1. +#' @param failure_threshold When a Pod starts and the liveness probe fails, +#' Kubernetes will try failureThreshold times before giving up. Defaults to 3. +#' Minimum value is 1. +#' @param namespace The Kubernetes namespace in which to deploy this +#' Webservice: up to 63 lowercase alphanumeric ('a'-'z', '0'-'9') and hyphen +#' ('-') characters. The first and last characters cannot be hyphens. +#' @param token_auth_enabled Whether or not to enable Token auth for this +#' Webservice. If this is enabled, users can access this Webservice by fetching +#' access token using their Azure Active Directory credentials. +#' Defaults to FALSE +#' @export +update_aks_webservice <- function(webservice, autoscale_enabled = NULL, + autoscale_min_replicas = NULL, + autoscale_max_replicas = NULL, + autoscale_refresh_seconds = NULL, + autoscale_target_utilization = NULL, + auth_enabled = NULL, + cpu_cores = NULL, + memory_gb = NULL, + enable_app_insights = NULL, + scoring_timeout_ms = NULL, + replica_max_concurrent_requests = NULL, + max_request_wait_time = NULL, + num_replicas = NULL, + tags = NULL, + properties = NULL, + description = NULL, + models = NULL, + inference_config = NULL, + gpu_cores = NULL, + period_seconds = NULL, + initial_delay_seconds = NULL, + timeout_seconds = NULL, + success_threshold = NULL, + failure_threshold = NULL, + namespace = NULL, + token_auth_enabled = NULL) { + webservice$update(autoscale_enabled = autoscale_enabled, + autoscale_min_replicas = autoscale_min_replicas, + autoscale_max_replicas = autoscale_max_replicas, + autoscale_refresh_seconds = autoscale_refresh_seconds, + autoscale_target_utilization = autoscale_target_utilization, + auth_enabled = auth_enabled, + cpu_cores = cpu_cores, + memory_gb = memory_gb, + enable_app_insights = enable_app_insights, + scoring_timeout_ms = scoring_timeout_ms, + replica_max_concurrent_requests = replica_max_concurrent_requests, + max_request_wait_time = max_request_wait_time, + num_replicas = num_replicas, + tags = tags, + properties = properties, + description = description, + models = models, + inference_config = inference_config, + gpu_cores = gpu_cores, + period_seconds = period_seconds, + initial_delay_seconds = initial_delay_seconds, + timeout_seconds = timeout_seconds, + success_threshold = success_threshold, + failure_threshold = failure_threshold, + namespace = namespace, + token_auth_enabled = token_auth_enabled) + invisible(NULL) +} diff --git a/R/webservice-local.R b/R/webservice-local.R new file mode 100644 index 00000000..70b14f4c --- /dev/null +++ b/R/webservice-local.R @@ -0,0 +1,61 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create a configuration object for deploying a local Webservice. +#' @param port The local port on which to expose the service's HTTP endpoint. +#' @return LocalWebserviceDeploymentConfiguration object to use when deploying +#' a Webservice object. +#' @export +local_webservice_deployment_config <- function(port = NULL) { + config <- azureml$core$webservice$LocalWebservice$deploy_configuration(port) + invisible(config) +} + +#' Update the LocalWebservice with provided properties. +#' Values left as None will remain unchanged in this LocalWebservice. +#' @param webservice LocalWebservice object. +#' @param models A new list of models contained in the LocalWebservice. +#' @param deployment_config Deployment configuration options to apply to the +#' LocalWebservice. +#' @param wait Wait for the service's container to reach a healthy state. +#' @param inference_config An InferenceConfig object used to provide the +#' required model deployment properties. +#' @export +update_local_webservice <- function(webservice, models = NULL, + deployment_config = NULL, + wait = FALSE, + inference_config = NULL) { + webservice$update(models = models, + deployment_config = deployment_config, + wait = wait, + inference_config = inference_config) + invisible(NULL) +} + +#' Delete this LocalWebservice from the local machine. +#' This function call is not asynchronous; it runs until the service is deleted. +#' @param webservice LocalWebservice object. +#' @param delete_cache Delete temporary files cached for the service. +#' @param delete_image Delete the service's Docker image. +#' @export +delete_local_webservice <- function(webservice, + delete_cache = TRUE, + delete_image = FALSE) { + webservice$delete(delete_cache = delete_cache, + delete_image = delete_image) + invisible(NULL) +} + +#' Reload the LocalWebservice's execution script and dependencies. +#' This restarts the service's container with copies of updated assets, +#' including the execution script and local dependencies, but it does not +#' rebuild the underlying image. Accordingly, changes to Conda/pip dependencies +#' or custom Docker steps will not be reflected in the reloaded LocalWebservice. +#' To handle those changes call LocalWebservice.update(), instead. +#' @param webservice LocalWebservice object. +#' @param wait Wait for the service's container to reach a healthy state. +#' @export +reload_local_webservice_assets <- function(webservice, wait = FALSE) { + webservice$reload(wait) + invisible(NULL) +} diff --git a/R/webservice.R b/R/webservice.R new file mode 100644 index 00000000..092427f3 --- /dev/null +++ b/R/webservice.R @@ -0,0 +1,94 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Retrieve a cloud representation of a Webservice object associated with the +#' provided workspace. Will return an instance of a child class corresponding to +#' the specific type of the retrieved Webservice object. +#' @param workspace The workspace object containing the Webservice object to +#' retrieve +#' @param name The name of the of the Webservice object to retrieve +#' @return The webservice object +#' @export +get_webservice <- function(workspace, name) { + webservice <- azureml$core$Webservice(workspace, name) + invisible(webservice) +} + +#' Automatically poll on the running Webservice deployment. +#' @param webservice The webservice object. +#' @param show_output Option to print more verbose output. +#' @export +wait_for_deployment <- function(webservice, show_output = FALSE) { + webservice$wait_for_deployment(show_output) +} + +#' Retrieve logs for the Webservice. +#' @param webservice The webservice object. +#' @param num_lines The maximum number of log lines to retrieve. +#' @return The logs for this Webservice +#' @export +get_webservice_logs <- function(webservice, num_lines = 5000L) { + webservice$get_logs(num_lines) +} + +#' Retrieve auth keys for this Webservice. +#' @param webservice The webservice object. +#' @return The auth keys for this Webservice +#' @export +get_webservice_keys <- function(webservice) { + webservice$get_keys() +} + +#' Delete this Webservice from its associated workspace. +#' @param webservice The webservice object. +#' @export +delete_webservice <- function(webservice) { + webservice$delete() + invisible(NULL) +} + +#' Call this Webservice with the provided input. +#' @param webservice The webservice object. +#' @param input_data The input data to call the Webservice with. This is the +#' data your machine learning model expects as an input to run predictions. +#' @return The result of calling the Webservice. This will return predictions +#' run from your machine learning model. +#' @export +invoke_webservice <- function(webservice, input_data) { + webservice$run(input_data) +} + +#' Regenerate one of the Webservice's keys. Must specify either 'Primary' or +#' 'Secondary' key. +#' @param webservice The webservice object. +#' @param key_type Which key to regenerate. Options are 'Primary' or +#' 'Secondary' +#' @export +generate_new_webservice_key <- function(webservice, key_type) { + webservice$regen_key(key_type) + invisible(NULL) +} + +#' Retrieve auth token for this Webservice, scoped to the current user. +#' @param webservice The webservice object. +#' @return The auth token for this Webservice and when it should be +#' refreshed after. +#' @export +get_webservice_token <- function(webservice) { + webservice$get_token() +} + +#' Convert this Webservice into a json serialized dictionary. +#' @param webservice The webservice object. +#' @return The json representation of this Webservice +serialize_webservice <- function(webservice) { + webservice$serialize() +} + +#' Convert a json object into a Webservice object. +#' @param workspace The workspace object the Webservice is registered under +#' @param webservice_payload A json object to convert to a Webservice object +#' @return The Webservice representation of the provided json object +deserialize_to_webservice <- function(workspace, webservice_payload) { + azureml$core$Webservice$deserialize(workspace, webservice_payload) +} diff --git a/R/workspace.R b/R/workspace.R new file mode 100644 index 00000000..a66846d6 --- /dev/null +++ b/R/workspace.R @@ -0,0 +1,311 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +#' Create a new Azure Machine Learning workspace +#' +#' @description +#' Create a new Azure Machine Learning workspace. Throws an exception if the +#' workspace already exists or any of the workspace requirements are not +#' satisfied. When you create new workspace, it automatically creates several +#' Azure resources that are used in the workspace: +#' +#' * Azure Container Registry: Registers Docker containers that you use during +#' training and when you deploy a model. To minimize costs, ACR is +#' lazy-loaded until deployment images are created. +#' * Azure Storage account: Used as the default datastore for the workspace. +#' * Azure Application Insights: Stores monitoring information about your +#' models. +#' * Azure Key Vault: Stores secrets that are used by compute targets and other +#' sensitive information that's needed by the workspace. +#' @param name A string of the new workspace name. Workspace name has to be +#' between 2 and 32 characters of letters and numbers. +#' @param subscription_id A string of the subscription ID of the containing +#' subscription for the new workspace. The parameter is required if the user has +#' access to more than one subscription. +#' @param resource_group A string of the Azure resource group that is containing +#' the workspace. The parameter defaults to a mutation of the workspace name. +#' @param location A string of the location of the workspace. The parameter +#' defaults to the resource group location. The location has to be a supported +#' region for Azure Machine Learning Services. +#' @param create_resource_group If `TRUE` the resource group will be created +#' if it doesn't exist. +#' @param friendly_name A string of the friendly name for the workspace that +#' can be displayed in the UI. +#' @param storage_account A string of an existing storage account in the Azure +#' resource ID format. The storage will be used by the workspace to save run +#' outputs, code, logs etc. If `NULL` a new storage will be created. +#' @param key_vault A string of an existing key vault in the Azure resource ID +#' format. The key vault will be used by the workspace to store credentials +#' added to the workspace by the users. If `NULL` a new key vault will be +#' created. +#' @param app_insights A string of an existing Application Insights in the Azure +#' resource ID format. The Application Insights will be used by the workspace to +#' log webservices events. If `NULL` a new Application Insights will be created. +#' @param container_registry A string of an existing container registry in the +#' Azure resource ID format. The container registry will be used by the +#' workspace to pull and push both experimentation and webservices images. If +#' `NULL` a new container registry will be created. +#' @param exist_ok If `TRUE` the method will not fail if the workspace already +#' exists. +#' @param show_output If `TRUE` the method will print out incremental progress +#' of method. +#' @return The `Workspace` object. +#' @export +#' @section Usage: +#' The first example requires only minimal specification, and all dependent +#' resources as well as the resource group will be created automatically. +#' ``` +#' ws <- create_workspace(name = 'myworkspace', +#' subscription_id = '', +#' resource_group = 'myresourcegroup', +#' location = 'eastus2') +#' ``` +#' +#' The following example shows how to reuse existing Azure resources by making +#' use of all parameters utilizing the Azure resource ID format. The specific +#' Azure resource IDs can be retrieved through the Azure Portal or SDK. This +#' assumes that the resource group, storage account, key vault, App Insights +#' and container registry already exist. +#' ``` +#' ws <- create_workspace( +#' name = 'myworkspace', +#' subscription_id = '', +#' resource_group = 'myresourcegroup', +#' create_resource_group = FALSE, +#' location = 'eastus2', +#' friendly_name = 'My workspace', +#' storage_account = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.storage/storageaccounts/mystorageaccount', +#' key_vault = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.keyvault/vaults/mykeyvault', +#' app_insights = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.insights/components/myappinsights', +#' container_registry = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.containerregistry/registries/mycontainerregistry') +#' ``` +#' @md +create_workspace <- function( + name, + subscription_id = NULL, + resource_group = NULL, + location = NULL, + create_resource_group = TRUE, + friendly_name = NULL, + storage_account = NULL, + key_vault = NULL, + app_insights = NULL, + container_registry = NULL, + exist_ok = FALSE, + show_output = TRUE) { + ws <- + azureml$core$Workspace$create(name = name, + subscription_id = subscription_id, + resource_group = resource_group, + location = location, + create_resource_group = create_resource_group, + friendly_name = friendly_name, + storage_account = storage_account, + key_vault = key_vault, + app_insights = app_insights, + container_registry = container_registry, + exist_ok = exist_ok, + show_output = show_output) + invisible(ws) +} + +#' Get an existing workspace +#' +#' @description +#' Returns a `Workspace` object for an existing Azure Machine Learning +#' workspace. Throws an exception if the workpsace doesn't exist or the +#' required fields don't lead to a uniquely identifiable workspace. +#' @param name A string of the workspace name to get. +#' @param subscription_id A string of the subscription ID to use. The parameter +#' is required if the user has access to more than one subscription. +#' @param resource_group A string of the resource group to use. If `NULL` the +#' method will search all resource groups in the subscription. +#' @return The `Workspace` object. +#' @export +#' @md +get_workspace <- function(name, subscription_id = NULL, resource_group = NULL) { + tryCatch({ + azureml$core$Workspace$get(name, auth = NULL, + subscription_id = subscription_id, + resource_group = resource_group) + }, + error = function(e) { + if (grepl("No workspaces found with name=", e$message, )) { + NULL + } else { + stop(message(e)) + } + }) +} + +#' Load workspace configuration details from a config file +#' +#' @description +#' Returns a `Workspace` object for an existing Azure Machine Learning +#' workspace by reading the workspace configuration from a file. The method +#' provides a simple way of reusing the same workspace across multiple files or +#' projects. Users can save the workspace ARM properties using +#' `write_workspace_config()`, and use this method to load the same workspace +#' in different files or projects without retyping the workspace ARM properties. +#' @param path A string of the path to the config file or starting directory +#' for search. The parameter defaults to starting the search in the current +#' directory. +#' @return The `Workspace` object. +#' @export +#' @md +load_workspace_from_config <- function(path = NULL) { + azureml$core$workspace$Workspace$from_config(path) +} + +#' Delete a workspace +#' +#' @description +#' Delete the Azure Machine Learning workspace resource. `delete_workspace()` +#' can also delete the workspace's associated resources. +#' @param workspace The `Workspace` object of the workspace to delete. +#' @param delete_dependent_resources If `TRUE` the workspace's associated +#' resources, i.e. ACR, storage account, key value, and application insights +#' will also be deleted. +#' @param no_wait If `FALSE` do not wait for the workspace deletion to complete. +#' @export +#' @md +delete_workspace <- function(workspace, + delete_dependent_resources = FALSE, + no_wait = FALSE) { + workspace$delete(delete_dependent_resources, no_wait) + invisible(NULL) +} + +#' List all workspaces that the user has access to in a subscription ID +#' +#' @description +#' List all workspaces that the user has access to in the specified +#' `subscription_id` parameter. The list of workspaces can be filtered +#' based on the resource group. +#' @param subscription_id A string of the specified subscription ID to +#' list the workspaces in. +#' @param resource_group A string of the specified resource group to list +#' the workspaces. If `NULL` the method will list all the workspaces within +#' the specified subscription in. +#' @return A named list of `Workspace` objects where element name corresponds +#' to the workspace name. +#' @export +#' @md +list_workspaces <- function(subscription_id, resource_group = NULL) { + azureml$core$workspace$Workspace$list(subscription_id, resource_group) +} + +#' Write out the workspace configuration details to a config file +#' +#' @description +#' Write out the workspace ARM properties to a config file. Workspace ARM +#' properties can be loaded later using `load_workspace_from_config()`. +#' The method provides a simple way of reusing the same workspace across +#' multiple files or projects. Users can save the workspace ARM properties +#' using this function, and use `load_workspace_from_config()` to load the +#' same workspace in different files or projects without retyping the +#' workspace ARM properties. +#' @param workspace The `Workspace` object whose config has to be written down. +#' @param path A string of the location to write the config.json file. +#' The parameter defaults to the current working directory. +#' @param file_name A string of the name to use for the config file. The +#' parameter defaults to `'config.json'`. +#' @export +#' @md +write_workspace_config <- function(workspace, path = NULL, file_name = NULL) { + workspace$write_config(path, file_name) + invisible(NULL) +} + +#' Get the default datastore for a workspace +#' +#' @description +#' Returns the default datastore associated with the workspace. +#' +#' When you create a workspace, an Azure blob container and Azure file share +#' are registered to the workspace with the names `workspaceblobstore` and +#' `workspacefilestore`, respectively. They store the connection information +#' of the blob container and the file share that is provisioned in the storage +#' account attached to the workspace. The `workspaceblobstore` is set as the +#' default datastore, and remains the default datastore unless you set a new +#' datastore as the default with `set_default_datastore()`. +#' @param workspace The `Workspace` object. +#' @return The default `Datastore` object. +#' @export +#' @section Examples: +#' Get the default datastore for the datastore: +#' ``` +#' ws <- load_workspace_from_config() +#' ds <- get_default_datastore(ws) +#' ``` +#' +#' If you have not changed the default datastore for the workspace, the +#' following code will return the same datastore object as the above +#' example: +#' ``` +#' ws <- load_workspace_from_config() +#' ds <- get_datastore(ws, datastore_name = 'workspaceblobstore') +#' ``` +#' @md +get_default_datastore <- function(workspace) { + workspace$get_default_datastore() +} + +#' Get the default keyvault for a workspace +#' +#' @description +#' Returns a `Keyvault` object representing the default +#' [Azure Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/key-vault-overview) +#' associated with the workspace. +#' @param workspace The `Workspace` object. +#' @return The `Keyvault` object. +#' @export +#' @seealso +#' `set_secrets()`, `get_secrets()`, `list_secrets()`, `delete_secrets()` +#' @md +get_default_keyvault <- function(workspace) { + workspace$get_default_keyvault() +} + +#' Get the details of a workspace +#' +#' @description +#' Returns the details of the workspace. +#' @param workspace The `Workspace` object. +#' @return A named list of the workspace details. +#' @export +#' @section Details: +#' The returned list contains the following named elements: +#' * *id*: URI pointing to the workspace resource, containing subscription ID, +#' resource group, and workspace name. +#' * *name*: Workspace name. +#' * *location*: Workspace region. +#' * *type*: URI of the format `"{providerName}/workspaces"`. +#' * *workspaceid*: Workspace ID. +#' * *description*: Workspace description. +#' * *friendlyName*: Workspace friendly name. +#' * *creationTime*: Time the workspace was created, in ISO8601. +#' * *containerRegistry*: Workspace container registry. +#' * *keyVault*: Workspace key vault. +#' * *applicationInsights*: Workspace App Insights. +#' * *identityPrincipalId*: Workspace identity principal ID. +#' * *identityTenantId*: Workspace tenant ID. +#' * *identityType*: Workspace identity type. +#' * *storageAccount*: Workspace storage account. +#' @md +get_workspace_details <- function(workspace) { + workspace$get_details() +} + +#' Set the default datastore for a workspace +#' +#' @description +#' Set the default datastore associated with the workspace. +#' @param workspace The `Workspace` object. +#' @param datastore_name The name of the datastore to be set as default. +#' @export +#' @md +set_default_datastore <- function(workspace, datastore_name) { + workspace$set_default_datastore(datastore_name) + invisible(NULL) +} diff --git a/README.md b/README.md new file mode 100644 index 00000000..87cb5d41 --- /dev/null +++ b/README.md @@ -0,0 +1,104 @@ +# Azure Machine Learning SDK for R +| Build | Docs | +|:------|:-----| +| [![Build Status](https://msdata.visualstudio.com/Vienna/_apis/build/status/AzureML-SDK%20R/R%20SDK%20Build?branchName=master)](https://msdata.visualstudio.com/Vienna/_build/latest?definitionId=7523&branchName=master) | [![Build Status](https://msdata.visualstudio.com/Vienna/_apis/build/status/AzureML-SDK%20R/R%20SDK%20Docs?branchName=master)](https://msdata.visualstudio.com/Vienna/_build/latest?definitionId=7950&branchName=master) | + +Data scientists and AI developers use the Azure Machine Learning SDK for R to build and run machine learning workflows with the Azure Machine Learning service. + +Azure Machine Learning SDK for R uses the reticulate package to bind to [Azure Machine Learning's Python SDK](https://docs.microsoft.com/azure/machine-learning/service/overview-what-is-azure-ml). By binding directly to Python, the Azure Machine Learning SDK for R allows you access to core objects and methods implemented in the Python SDK from any R environment you choose. + +Main capabilities of the SDK include: + +- Manage cloud resources for monitoring, logging, and organizing your machine learning experiments. +- Train models using cloud resources, including GPU-accelerated model training. +- Deploy your models as webservices on Azure Container Instances (ACI) and Azure Kubernetes Service (AKS). + +Please take a look at the package website https://azure.github.io/azureml-sdk-for-r for complete documentation. + +## Key Features and Roadmap + +:heavy_check_mark: feature available :arrows_counterclockwise: in progress :clipboard: planned + +| Features | Description | Status | +|----------|-------------|--------| +| [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspaces) | The `Workspace` class is a foundational resource in the cloud that you use to experiment, train, and deploy machine learning models | :heavy_check_mark: | +| [Data Plane Resources](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#datasets-and-datastores) | `Datastore`, which stores connection information to an Azure storage service, and `DataReference`, which describes how and where data should be made available in a run. | :heavy_check_mark: | +| [Compute](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-targets) | Cloud resources where you can train your machine learning models.| :heavy_check_mark: | +| [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiments) | A foundational cloud resource that represents a collection of trials (individual model runs).| :heavy_check_mark: | +| [Run](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#runs) | A `Run` object represents a single trial of an experiment, and is the object that you use to monitor the asynchronous execution of a trial, store the output of the trial, analyze results, and access generated artifacts. You use `Run` inside your experimentation code to log metrics and artifacts to the Run History service. | :heavy_check_mark: | +| [Estimator](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#estimators) | A generic estimator to train data using any supplied training script. | :heavy_check_mark: | +| [HyperDrive](https://docs.microsoft.com/azure/machine-learning/service/how-to-tune-hyperparameters) | HyperDrive automates the process of running hyperparameter sweeps for an `Experiment`. | :heavy_check_mark: | +| [Model](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#models) | Cloud representations of machine learning models that help you transfer models between local development environments and the `Workspace` object in the cloud. | :heavy_check_mark: | +| [Webservice](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#web-service-deployments) | Models can be packaged into container images that include the runtime environment and dependencies. Models must be built into an image before you deploy them as a web service. `Webservice` is the abstract parent class for creating and deploying web services for your models. | :heavy_check_mark: | +| [Dataset](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-azure-machine-learning-architecture#datasets-and-datastores) | An Azure Machine Learning `Dataset` allows you to explore, transform, and manage your data for various scenarios such as model training and pipeline creation. When you are ready to use the data for training, you can save the Dataset to your Azure ML workspace to get versioning and reproducibility capabilities. | :clipboard: | + +## Installation + +Install [Conda](https://docs.conda.io/en/latest/miniconda.html) if not already installed. Choose Python 3.5 or later. + +To get started, use the `remotes` package to install Azure ML SDK for R from GitHub. As the current repo is not yet public, you will need to [generate a personal access token](https://github.com/settings/tokens) and supply to auth_token argument. When generating the token, make sure to select the "repo" scope. + +```R +> remotes::install_github('https://github.com/Azure/azureml-sdk-for-r', + auth_token = '', + INSTALL_opts=c("--no-multiarch")) +``` +Then, use `install_azureml()` to install the compiled code from the AzureML Python SDK. +```R +> azureml::install_azureml() +``` + +Now, you're ready to get started! + +For a more detailed walk-through of the installation process, advanced options, and troubleshooting, see our [Installation Guide](https://azure.github.io/azureml-sdk-for-r/articles/installation.html). + +## Getting Started + +To begin running experiments with Azure Machine Learning, you must establish a connection to your Azure Machine Learning workspace. + +1. If you don't already have a workspace created, you can create one by doing: + + ```R + # If you haven't already set up a resource group, set `create_resource_group = TRUE` + # and set `resource_group` to your desired resource group name in order to create the resource group + # in the same step. + new_ws <- create_workspace(name = , + subscription_id = , + resource_group = , + location = location, + create_resource_group = FALSE) + ``` + + After the workspace is created, you can save it to a configuration file to the local machine. + + ```R + write_workspace_config(new_ws) + ``` + +2. If you have an existing workspace associated with your subscription, you can retrieve it from the server by doing: + + ```R + existing_ws <- get_workspace(name = , + subscription_id = , + resource_group = ) + ``` + Or, if you have the workspace config.json file on your local machine, you can load the workspace by doing: + + ```R + loaded_ws <- load_workspace_from_config() + ``` +Once you've accessed your workspace, you can begin running and tracking your own experiments with Azure Machine Learning SDK for R. + +Take a look at our [code samples](samples/) and [end-to-end vignettes](vignettes/) for examples of what's possible with the SDK! + +## Resources +* R SDK package documentation: https://azure.github.io/azureml-sdk-for-r/reference/index.html +* Azure Machine Learning service: https://docs.microsoft.com/en-us/azure/machine-learning/service/overview-what-is-azure-ml + +## Contribute +We welcome contributions from the community. If you would like to contribute to the repository, please refer to the [contribution guide](CONTRIBUTING.md). + +## Code of Conduct +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/_pkgdown.yml b/_pkgdown.yml new file mode 100644 index 00000000..c1236faa --- /dev/null +++ b/_pkgdown.yml @@ -0,0 +1,136 @@ +reference: +- title: Installation + desc: + contents: + - '`install_azureml`' +- title: Workspaces + desc: Functions for managing workspace resources. A **Workspace** is the top-level resource for Azure Machine Learning service. It provides a centralized place to work with all the artifacts you create when you use Azure ML. + contents: + - '`create_workspace`' + - '`get_workspace`' + - '`load_workspace_from_config`' + - '`write_workspace_config`' + - '`get_default_datastore`' + - '`set_default_datastore`' + - '`delete_workspace`' + - '`list_workspaces`' + - '`get_workspace_details`' + - '`get_default_keyvault`' + - '`set_secrets`' + - '`get_secrets`' + - '`delete_secrets`' + - '`list_secrets`' +- title: Compute targets + desc: Functions for managing compute resources. A **Compute Target** is a designated compute resource where you run your scripts or host your service deployments. Compute targets make it easy to change your compute environment without changing your code. Supported compute target types in the R SDK include `AmlCompute` and `AksCompute`. + contents: + - '`create_aml_compute`' + - '`list_nodes_in_aml_compute`' + - '`update_aml_compute`' + - '`create_aks_compute`' + - '`get_aks_compute_credentials`' + - '`attach_aks_compute`' + - '`detach_aks_compute`' + - '`get_compute`' + - '`wait_for_provisioning_completion`' + - '`list_supported_vm_sizes`' + - '`delete_compute`' +- title: Working with data + desc: Functions for managing and accessing data for your machine learning workflows. A **Datastore** is attached to a workspace and is used to store connection information to an Azure storage service. Azure storage services that can be registered as datastores through the R SDK include Azure Blob Container and Azure File Share. + contents: + - '`upload_files_to_datastore`' + - '`upload_to_datastore`' + - '`download_from_datastore`' + - '`get_datastore`' + - '`register_azure_blob_container_datastore`' + - '`register_azure_file_share_datastore`' + - '`unregister_datastore`' +- title: Environments + desc: Functions for managing environments. An Azure Machine Learning **Environment** allows you to create, manage, and reuse the software dependencies required for training and deployment. Environments specify the R packages, environment variables, and software settings around your training and scoring scripts for your containerized training runs and deployments. They are managed and versioned entities within your Azure ML workspace that enable reproducible, auditable, and portable machine learning workflows across different compute targets. For more details, see `r_environment()`. + contents: + - '`r_environment`' + - '`register_environment`' + - '`get_environment`' + - '`container_registry`' +- title: Training & experimentation + desc: Functions for managing experiments and runs. An **Experiment** is a grouping of the collection of runs from a specified script. A **Run** represents a single trial of an experiment. A run is the object used to monitor the asynchronous execution of a trial, log metrics and store output of the trial, and to analyze results and access artifacts generated by the trial. The following run types are supported - `ScriptRun` (for Estimator experiments) and `HyperDriveRun` (for HyperDrive experiments). For functions that are specific only to HyperDriveRuns, see the **Hyperparameter tuning** reference sections. An **Estimator** wraps run configuration information for specifying details of executing an R script. Running an Estimator experiment (using `submit_experiment()`) will return a `ScriptRun` object and execute your training script on the specified compute target. + contents: + - '`experiment`' + - '`submit_experiment`' + - '`get_runs_in_experiment`' + - '`estimator`' + - '`wait_for_run_completion`' + - '`get_current_run`' + - '`log_metric_to_run`' + - '`get_run_metrics`' + - '`cancel_run`' + - '`download_file_from_run`' + - '`download_files_from_run`' + - '`get_run`' + - '`get_run_details`' + - '`get_run_details_with_logs`' + - '`get_run_file_names`' + - '`get_secrets_from_run`' + - '`log_accuracy_table_to_run`' + - '`log_confusion_matrix_to_run`' + - '`log_image_to_run`' + - '`log_list_to_run`' + - '`log_predictions_to_run`' + - '`log_residuals_to_run`' + - '`log_row_to_run`' + - '`log_table_to_run`' +- title: Hyperparameter tuning + desc: Functions for configuring and managing hyperparameter tuning (HyperDrive) experiments. Azure ML's HyperDrive functionality enables you to automate hyperparameter tuning of your machine learning models. For example, you can define the parameter search space as discrete or continuous, and a sampling method over the search space as random, grid, or Bayesian. Also, you can specify a primary metric to optimize in the hyperparameter tuning experiment, and whether to minimize or maximize that metric. You can also define early termination policies in which poorly performing experiment runs are canceled and new ones started. + contents: + - '`hyperdrive_config`' + - '`random_parameter_sampling`' + - '`grid_parameter_sampling`' + - '`bayesian_parameter_sampling`' + - '`choice`' + - '`randint`' + - '`uniform`' + - '`quniform`' + - '`loguniform`' + - '`qloguniform`' + - '`normal`' + - '`qnormal`' + - '`lognormal`' + - '`qlognormal`' + - '`primary_metric_goal`' + - '`bandit_policy`' + - '`median_stopping_policy`' + - '`truncation_selection_policy`' + - '`get_best_run_by_primary_metric`' + - '`get_child_runs_sorted_by_primary_metric`' + - '`get_child_run_hyperparameters`' + - '`get_child_run_metrics`' +- title: Model management & deployment + desc: Functions for model management and deployment. Registering a model allows you to store and version your trained model in a workspace. A registered **Model** can then be deployed as a **Webservice** using Azure ML. If you would like to access all the assets needed to host a model as a web service without actually deploying the model, you can do so by packaging the model as a `ModelPackage`. You can deploy your model as a `LocalWebservice` (locally), `AciWebservice` (on Azure Container Instances), or `AksWebservice` (on Azure Kubernetes Service). + contents: + - '`get_model`' + - '`register_model`' + - '`download_model`' + - '`deploy_model`' + - '`package_model`' + - '`delete_model`' + - '`get_model_package_container_registry`' + - '`get_model_package_creation_logs`' + - '`pull_model_package_image`' + - '`save_model_package_files`' + - '`wait_for_model_package_creation`' + - '`inference_config`' + - '`get_webservice`' + - '`wait_for_deployment`' + - '`get_webservice_logs`' + - '`get_webservice_keys`' + - '`generate_new_webservice_key`' + - '`get_webservice_token`' + - '`invoke_webservice`' + - '`delete_webservice`' + - '`aci_webservice_deployment_config`' + - '`update_aci_webservice`' + - '`aks_webservice_deployment_config`' + - '`update_aks_webservice`' + - '`local_webservice_deployment_config`' + - '`update_local_webservice`' + - '`delete_local_webservice`' + - '`reload_local_webservice_assets`' diff --git a/azureml-sdk-for-r.Rproj b/azureml-sdk-for-r.Rproj new file mode 100644 index 00000000..21a4da08 --- /dev/null +++ b/azureml-sdk-for-r.Rproj @@ -0,0 +1,17 @@ +Version: 1.0 + +RestoreWorkspace: Default +SaveWorkspace: Default +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +UseSpacesForTab: Yes +NumSpacesForTab: 2 +Encoding: UTF-8 + +RnwWeave: Sweave +LaTeX: pdfLaTeX + +BuildType: Package +PackageUseDevtools: Yes +PackageInstallArgs: --no-multiarch --with-keep.source diff --git a/dev_instruction.md b/dev_instruction.md new file mode 100644 index 00000000..e81f1339 --- /dev/null +++ b/dev_instruction.md @@ -0,0 +1,30 @@ +# Developer instructions on building `azureml` package +1. Make sure below packages are installed. + ``` + install.packages('devtools') + ``` +2. Run the following to build the code. The R package file will be created at `package_location`. We can now either upload it to a blob store, publish it to CRAN or install directly from the file. + ``` + setwd('') + + # Generate .Rd files in man/ and NAMESPACE + roxygen2::roxygenise() + + # Build the R package + package_location <- devtools::build() + ``` +3. To install the package from the `.tar.gz` file in the filesystem, do: + ``` + install.packages(package_location, repos = NULL) + ``` + To install from a url: + ``` + install.packages(package_url, repos = NULL) + ``` + + If you already have the package loaded in your R session, you may want to + remove it from the session to use the new one. This can be done by the + following: + ``` + detach("package:azureml", unload = TRUE) + ``` diff --git a/docs/404.html b/docs/404.html new file mode 100644 index 00000000..6bd02e83 --- /dev/null +++ b/docs/404.html @@ -0,0 +1,150 @@ + + + + + + + + +Page not found (404) • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +Content not found. Please use links in the navbar. + +
+ +
+ + + +
+ + +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/CODE_OF_CONDUCT.html b/docs/CODE_OF_CONDUCT.html new file mode 100644 index 00000000..c046aa27 --- /dev/null +++ b/docs/CODE_OF_CONDUCT.html @@ -0,0 +1,159 @@ + + + + + + + + +Microsoft Open Source Code of Conduct • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
+ +

This project has adopted the Microsoft Open Source Code of Conduct.

+

Resources:

+ +
+ +
+ +
+ + + +
+ + +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/CONTRIBUTING.html b/docs/CONTRIBUTING.html new file mode 100644 index 00000000..416564b9 --- /dev/null +++ b/docs/CONTRIBUTING.html @@ -0,0 +1,155 @@ + + + + + + + + +Contributing • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
+ +

This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.

+

When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repositories using our CLA.

+

This project has adopted the Microsoft Open Source Code of Conduct. For more information see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments.

+
+ +
+ +
+ + + +
+ + +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/LICENSE-text.html b/docs/LICENSE-text.html new file mode 100644 index 00000000..19aa5001 --- /dev/null +++ b/docs/LICENSE-text.html @@ -0,0 +1,171 @@ + + + + + + + + +License • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
    MIT License
+
+    Copyright (c) Microsoft Corporation. All rights reserved.
+
+    Permission is hereby granted, free of charge, to any person obtaining a copy
+    of this software and associated documentation files (the "Software"), to deal
+    in the Software without restriction, including without limitation the rights
+    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+    copies of the Software, and to permit persons to whom the Software is
+    furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in all
+    copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+    SOFTWARE
+
+ +
+ +
+ + + +
+ + +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/articles/cnn-tuning-with-hyperdrive/cnn-tuning-with-hyperdrive.html b/docs/articles/cnn-tuning-with-hyperdrive/cnn-tuning-with-hyperdrive.html new file mode 100644 index 00000000..666ea811 --- /dev/null +++ b/docs/articles/cnn-tuning-with-hyperdrive/cnn-tuning-with-hyperdrive.html @@ -0,0 +1,288 @@ + + + + + + + +Hyperparameter Tuning a Keras Model with HyperDrive • azureml + + + + + + + + + + +
+
+ + + + +
+
+ + + + +

This article demonstrates how you can efficiently tune hyperparameters for a model using AzureML SDK for R. We will train a Keras model on the CIFAR10 dataset, automate hyperparameter exploration, launch parallel jobs, log our results, and find the best run using AzureML’s HyperDrive service.

+
+

+What are hyperparameters?

+

Hyperparameters are variable parameters chosen to train a model. Learning rate, number of epochs, and batch size are all examples of hyperparameters.

+

Using brute-force methods to find the optimal values for parameters can be time-consuming, and poor-performing runs can result in wasted money. To avoid this, HyperDrive automates hyperparameter exploration in a time-saving and cost-effective manner by launching several parallel runs with different configurations and finding the configuration that results in best performance on your primary metric.

+

Let’s get started with the example to see how it works!

+
+
+

+1. Set up the experiment

+

First, we will prepare for training by loading the required package, initializing a workspace, and creating an experiment.

+
+

+Import package

+
library("azureml")
+
+
+

+Initialize a workspace

+

The Workspace is the top-level resource for the service. It provides us with a centralized place to work with all the artifacts we will create.

+

You can create a Workspace object from a local config.json file

+ +

Or load an existing workspace from your Azure Machine Learning account

+
ws <- get_workspace("<your workspace name>", "<your subscription ID>", "<your resource group>")
+
+
+

+Create a deep learning experiment

+

For this example, we will create an experiment named “hyperdrive-cifar10”.

+
exp <- experiment(workspace = ws, name = 'hyperdrive-cifar10')
+
+
+
+

+2. Create a compute target

+

Now, we will create a compute target for our job to run on. In this example, we are creating a GPU-enabled compute cluster.

+ +
+
+

+3. Prepare training script

+

In order to collect and upload run metrics, we need to import the azureml package at the top of our training script, “cifar10_cnn.R”.

+
library("azureml")
+

Then, we need to edit our script so that it can log our parameters. We will use the log_metric_to_run function to log our hyperparameters at the top of the script and to log our primary metric at the bottom of the script.

+
current_run <- get_current_run()
+...
+log_metric_to_run("batch_size", batch_size, current_run)
+...
+log_metric_to_run("epochs", epochs, current_run)
+...
+log_metric_to_run("lr", lr, current_run)
+...
+log_metric_to_run("decay", decay, current_run)
+
log_metric_to_run("Loss", results[[1]], current_run)
+
+
+

+4. Create an estimator

+

An Estimator offers a simple way to launch a training job on a compute target.

+

Our training script will need the Keras package to run, and we can have it installed in the Docker container where our job will run by passing the package name via the cran_packages parameter.

+
est <- estimator(source_directory = ".", entry_script = "cifar10_cnn.R",
+                 compute_target = compute_target, cran_packages = c("keras"))
+
+
+

+5. Set HyperDrive configuration

+
+

+Define search space

+

In this experiment, we will use four hyperparameters: batch size, number of epochs, learning rate, and decay. In order to begin tuning, we must define the range of values we would like to pull from and how they will be distributed. This is called a parameter space definition and can be created with discrete or continuous ranges.

+

Discrete hyperparameters are specified as a choice among discrete values represented as a list.

+

Advanced discrete hyperparameters can also be specified using a distribution. The following distributions are supported:

+ +

Continuous hyperparameters are specified as a distribution over a continuous range of values. The following distributions are supported:

+ +

Here, we will use the random_parameter_sampling function to define the search space for each hyperparameter. batch_size and epochs will be chosen from discrete sets while lr and decay will be drawn from continuous distributions.

+

Other sampling function options are:

+ +
sampling <- random_parameter_sampling(list(batch_size = choice(c(16, 32, 64)),
+                                           epochs = choice(c(200, 350, 500)),
+                                           lr = normal(0.0001, 0.005),
+                                           decay = uniform(1e-6, 3e-6)))
+
+
+

+Define termination policy

+

To prevent resource waste, we should detect and terminate poorly performing runs. HyperDrive will do this automatically if we set up an early termination policy.

+

Here, we will use the bandit_policy which terminates any runs where the primary metric is not within the specified slack factor with respect to the best performing training run.

+
policy <- bandit_policy(slack_factor = 0.15)
+

Other termination policy options are:

+ +

If no policy is provided, all runs will continue to completion regardless of performance.

+
+
+

+Finalize configuration

+

Now, we can create a HyperDriveConfig object to define our group of jobs. Along with our sampling and policy definitions, we need to specify the name of the primary metric that we want to track and whether we want to maximize it or minimize it.

+ +
+
+
+

+6. Submit HyperDrive run

+

Submitting our experiment will start multiple simultaneous runs and return a HyperDriveRun object that we will use to interface with the run history during and after the job.

+
hyperdrive_run <- submit_experiment(exp, hyperdrive_config)
+wait_for_run_completion(hyperdrive_run, show_output = TRUE)
+
+
+

+7. Analyse runs by performance

+

Finally, we can view and compare the metrics collected during our all of our child runs!

+ +
+
+ + + +
+ + + +
+ +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + diff --git a/docs/articles/index.html b/docs/articles/index.html new file mode 100644 index 00000000..8b02e618 --- /dev/null +++ b/docs/articles/index.html @@ -0,0 +1,156 @@ + + + + + + + + +Articles • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/articles/installation.html b/docs/articles/installation.html new file mode 100644 index 00000000..461ea59e --- /dev/null +++ b/docs/articles/installation.html @@ -0,0 +1,205 @@ + + + + + + + +Installing AzureML SDK for R • azureml + + + + + + + + + + +
+
+ + + + +
+
+ + + + +
+

+1. Install Anaconda

+

If you do not have Anaconda already installed on your machine, you will first need to install it. Choose the 64-bit binary for Python 3.5 or later.

+
+
+

+2. Install azureml R package with remotes +

+

As the azureml package is not available on CRAN, you will need remotes to install from the GitHub repo.

+
install.packages('remotes')
+

Then, you can use the install_github function to install the package. Until the current repo is opened to the public, you will need to use a generated personal access token. When generating the token, make sure to select the “repo” scope.

+ +

Set build_vignettes to TRUE if you would like the vignettes to be downloaded along with the main package.

+
+
+

+3. Install AzureML Python SDK

+

Lastly, use the azureml R library to install the compiled code. By default, install_azureml will install the latest version of the AzureML Python SDK in a conda environment called r-azureml. If you would like to override the default version, environment name, or python version, you can pass in those arguments:

+ +
+
+

+4. Test installation

+

You can confirm your installation worked by loading the library and successfully retrieving a run.

+ +
## <azureml.core.run._OfflineRun>
+
+
+

+Troubleshooting

+ +
+
+ + + +
+ + + +
+ +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + diff --git a/docs/articles/train-with-tensorflow/train-with-tensorflow.html b/docs/articles/train-with-tensorflow/train-with-tensorflow.html new file mode 100644 index 00000000..b704148c --- /dev/null +++ b/docs/articles/train-with-tensorflow/train-with-tensorflow.html @@ -0,0 +1,212 @@ + + + + + + + +Training a TensorFlow Model on MNIST • azureml + + + + + + + + + + +
+
+ + + + +
+
+ + + + +

This article demonstrates how to run a TensorFlow training script at scale using AzureML SDK for R. We will train a TensorFlow model to classify handwritten digits using a deep neural network (DNN) and log our results to the Azure Machine Learning service.

+
+

+1. Set up the experiment

+

Let’s prepare for training by loading the required package, initializing a workspace, and creating an experiment.

+
+

+Import package

+
library("azureml")
+
+
+

+Initialize a workspace

+

The Workspace is the top-level resource for the service. It provides us with a centralized place to work with all the artifacts we will create.

+

You can create a Workspace object from a local config.json file

+ +

Or load an existing workspace from your Azure Machine Learning account

+
ws <- get_workspace("<your workspace name>", "<your subscription ID>", "<your resource group>")
+
+
+

+Create an experiment

+

For this example, we will create an Experiment called “tf-mnist”.

+
exp <- experiment(workspace = ws, name = 'tf-mnist')
+
+
+
+

+2. Create a compute target

+

Now, we will create a compute target for our TensorFlow job to run on. In this example, we are creating a CPU-enabled compute cluster.

+ +
+
+

+3. Prepare training script

+

In order to collect and upload run metrics, we need to import the azureml package at the top of our training script, “tf_mnist.R”.

+
library("azureml")
+

Then, we need to add the log_metric_to_run function to track our primary metric, “accuracy”, for this experiment. If you have your own training script with several important metrics, simply create a logging call for each one within the script.

+ +
+
+

+4. Create an estimator

+

An Estimator offers a simple way to launch a training job on a compute target. Our training script will need the TensorFlow package to run, and we can have it installed in the Docker container where our job will run by passing the package name to the cran_packages parameter.

+ +
+
+

+5. Submit a run

+

Submitting our experiment will return a Run object that we will use to interface with the run history during and after the job.

+
run <- submit_experiment(exp, est)
+wait_for_run_completion(run, show_output = TRUE)
+
+

+6. View metrics

+

Finally, we can view the metrics collected during our TensorFlow run!

+ +
+
+
+ + + +
+ + + +
+ +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + diff --git a/docs/authors.html b/docs/authors.html new file mode 100644 index 00000000..b18ef024 --- /dev/null +++ b/docs/authors.html @@ -0,0 +1,171 @@ + + + + + + + + +Authors • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
    +
  • +

    Himanshu Chandola. Author. +

    +
  • +
  • +

    Billy Hu. Author, maintainer. +

    +
  • +
  • +

    Heemanshu Suri. Author. +

    +
  • +
  • +

    Diondra Peck. Author. +

    +
  • +
  • +

    Microsoft. Copyright holder, funder. +

    +
  • +
+ +
+ +
+ + + +
+ + +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/dev_instruction.html b/docs/dev_instruction.html new file mode 100644 index 00000000..1779ced3 --- /dev/null +++ b/docs/dev_instruction.html @@ -0,0 +1,174 @@ + + + + + + + + +Developer instructions on building azureml package • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
+ +
    +
  1. Make sure below packages are installed. install.packages('devtools') +
  2. +
  3. +

    Run the following to build the code. The R package file will be created at package_location. We can now either upload it to a blob store, publish it to CRAN or install directly from the file.

    +
    setwd('<repo_root>')
    +
    +# Generate .Rd files in man/ and NAMESPACE
    +roxygen2::roxygenise()
    +
    +# Build the R package
    +package_location <- devtools::build()
    +
  4. +
  5. +

    To install the package from the .tar.gz file in the filesystem, do:

    +
    install.packages(package_location, repos = NULL)
    +

    To install from a url:

    +
    install.packages(package_url, repos = NULL)
    +

    If you already have the package loaded in your R session, you may want to remove it from the session to use the new one. This can be done by the following:

    +
    detach("package:azureml", unload = TRUE)
    +
  6. +
+
+ +
+ +
+ + + +
+ + +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/docsearch.css b/docs/docsearch.css new file mode 100644 index 00000000..e5f1fe1d --- /dev/null +++ b/docs/docsearch.css @@ -0,0 +1,148 @@ +/* Docsearch -------------------------------------------------------------- */ +/* + Source: https://github.com/algolia/docsearch/ + License: MIT +*/ + +.algolia-autocomplete { + display: block; + -webkit-box-flex: 1; + -ms-flex: 1; + flex: 1 +} + +.algolia-autocomplete .ds-dropdown-menu { + width: 100%; + min-width: none; + max-width: none; + padding: .75rem 0; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, .1); + box-shadow: 0 .5rem 1rem rgba(0, 0, 0, .175); +} + +@media (min-width:768px) { + .algolia-autocomplete .ds-dropdown-menu { + width: 175% + } +} + +.algolia-autocomplete .ds-dropdown-menu::before { + display: none +} + +.algolia-autocomplete .ds-dropdown-menu [class^=ds-dataset-] { + padding: 0; + background-color: rgb(255,255,255); + border: 0; + max-height: 80vh; +} + +.algolia-autocomplete .ds-dropdown-menu .ds-suggestions { + margin-top: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion { + padding: 0; + overflow: visible +} + +.algolia-autocomplete .algolia-docsearch-suggestion--category-header { + padding: .125rem 1rem; + margin-top: 0; + font-size: 1.3em; + font-weight: 500; + color: #00008B; + border-bottom: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--wrapper { + float: none; + padding-top: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column { + float: none; + width: auto; + padding: 0; + text-align: left +} + +.algolia-autocomplete .algolia-docsearch-suggestion--content { + float: none; + width: auto; + padding: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--content::before { + display: none +} + +.algolia-autocomplete .ds-suggestion:not(:first-child) .algolia-docsearch-suggestion--category-header { + padding-top: .75rem; + margin-top: .75rem; + border-top: 1px solid rgba(0, 0, 0, .1) +} + +.algolia-autocomplete .ds-suggestion .algolia-docsearch-suggestion--subcategory-column { + display: block; + padding: .1rem 1rem; + margin-bottom: 0.1; + font-size: 1.0em; + font-weight: 400 + /* display: none */ +} + +.algolia-autocomplete .algolia-docsearch-suggestion--title { + display: block; + padding: .25rem 1rem; + margin-bottom: 0; + font-size: 0.9em; + font-weight: 400 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--text { + padding: 0 1rem .5rem; + margin-top: -.25rem; + font-size: 0.8em; + font-weight: 400; + line-height: 1.25 +} + +.algolia-autocomplete .algolia-docsearch-footer { + width: 110px; + height: 20px; + z-index: 3; + margin-top: 10.66667px; + float: right; + font-size: 0; + line-height: 0; +} + +.algolia-autocomplete .algolia-docsearch-footer--logo { + background-image: url("data:image/svg+xml;utf8,"); + background-repeat: no-repeat; + background-position: 50%; + background-size: 100%; + overflow: hidden; + text-indent: -9000px; + width: 100%; + height: 100%; + display: block; + transform: translate(-8px); +} + +.algolia-autocomplete .algolia-docsearch-suggestion--highlight { + color: #FF8C00; + background: rgba(232, 189, 54, 0.1) +} + + +.algolia-autocomplete .algolia-docsearch-suggestion--text .algolia-docsearch-suggestion--highlight { + box-shadow: inset 0 -2px 0 0 rgba(105, 105, 105, .5) +} + +.algolia-autocomplete .ds-suggestion.ds-cursor .algolia-docsearch-suggestion--content { + background-color: rgba(192, 192, 192, .15) +} diff --git a/docs/docsearch.js b/docs/docsearch.js new file mode 100644 index 00000000..b35504cd --- /dev/null +++ b/docs/docsearch.js @@ -0,0 +1,85 @@ +$(function() { + + // register a handler to move the focus to the search bar + // upon pressing shift + "/" (i.e. "?") + $(document).on('keydown', function(e) { + if (e.shiftKey && e.keyCode == 191) { + e.preventDefault(); + $("#search-input").focus(); + } + }); + + $(document).ready(function() { + // do keyword highlighting + /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */ + var mark = function() { + + var referrer = document.URL ; + var paramKey = "q" ; + + if (referrer.indexOf("?") !== -1) { + var qs = referrer.substr(referrer.indexOf('?') + 1); + var qs_noanchor = qs.split('#')[0]; + var qsa = qs_noanchor.split('&'); + var keyword = ""; + + for (var i = 0; i < qsa.length; i++) { + var currentParam = qsa[i].split('='); + + if (currentParam.length !== 2) { + continue; + } + + if (currentParam[0] == paramKey) { + keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20")); + } + } + + if (keyword !== "") { + $(".contents").unmark({ + done: function() { + $(".contents").mark(keyword); + } + }); + } + } + }; + + mark(); + }); +}); + +/* Search term highlighting ------------------------------*/ + +function matchedWords(hit) { + var words = []; + + var hierarchy = hit._highlightResult.hierarchy; + // loop to fetch from lvl0, lvl1, etc. + for (var idx in hierarchy) { + words = words.concat(hierarchy[idx].matchedWords); + } + + var content = hit._highlightResult.content; + if (content) { + words = words.concat(content.matchedWords); + } + + // return unique words + var words_uniq = [...new Set(words)]; + return words_uniq; +} + +function updateHitURL(hit) { + + var words = matchedWords(hit); + var url = ""; + + if (hit.anchor) { + url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor; + } else { + url = hit.url + '?q=' + escape(words.join(" ")); + } + + return url; +} diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 00000000..af76f4a9 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,298 @@ + + + + + + + +R interface to AzureML SDK • azureml + + + + + + + + + + +
+
+ + + + +
+
+
+ + + + + + + + + + +
BuildDocs
Build StatusBuild Status
+

Data scientists and AI developers use the Azure Machine Learning SDK for R to build and run machine learning workflows with the Azure Machine Learning service.

+

Azure Machine Learning SDK for R uses the reticulate package to bind to Azure Machine Learning’s Python SDK. By binding directly to Python, the Azure Machine Learning SDK for R allows you access to core objects and methods implemented in the Python SDK from any R environment you choose.

+

Main capabilities of the SDK include:

+
    +
  • Manage cloud resources for monitoring, logging, and organizing your machine learning experiments.
  • +
  • Train models using cloud resources, including GPU-accelerated model training.
  • +
  • Deploy your models as webservices on Azure Container Instances (ACI) and Azure Kubernetes Service (AKS).
  • +
+

Please take a look at the package website https://azure.github.io/azureml-sdk-for-r for complete documentation.

+
+

+Key Features and Roadmap

+

✔️ feature available 🔄 in progress 📋 planned

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeaturesDescriptionStatus
WorkspaceThe Workspace class is a foundational resource in the cloud that you use to experiment, train, and deploy machine learning models✔️
Data Plane Resources +Datastore, which stores connection information to an Azure storage service, and DataReference, which describes how and where data should be made available in a run.✔️
ComputeCloud resources where you can train your machine learning models.✔️
ExperimentA foundational cloud resource that represents a collection of trials (individual model runs).✔️
RunA Run object represents a single trial of an experiment, and is the object that you use to monitor the asynchronous execution of a trial, store the output of the trial, analyze results, and access generated artifacts. You use Run inside your experimentation code to log metrics and artifacts to the Run History service.✔️
EstimatorA generic estimator to train data using any supplied training script.✔️
HyperDriveHyperDrive automates the process of running hyperparameter sweeps for an Experiment.✔️
ModelCloud representations of machine learning models that help you transfer models between local development environments and the Workspace object in the cloud.✔️
WebserviceModels can be packaged into container images that include the runtime environment and dependencies. Models must be built into an image before you deploy them as a web service. Webservice is the abstract parent class for creating and deploying web services for your models.✔️
DatasetAn Azure Machine Learning Dataset allows you to explore, transform, and manage your data for various scenarios such as model training and pipeline creation. When you are ready to use the data for training, you can save the Dataset to your Azure ML workspace to get versioning and reproducibility capabilities.📋
+
+
+

+Installation

+

Install Conda if not already installed. Choose Python 3.5 or later.

+

To get started, use the remotes package to install Azure ML SDK for R from GitHub. As the current repo is not yet public, you will need to generate a personal access token and supply to auth_token argument. When generating the token, make sure to select the “repo” scope.

+
> remotes::install_github('https://github.com/Azure/azureml-sdk-for-r',
+                           auth_token = '<your personal access token>',
+                           INSTALL_opts=c("--no-multiarch"))
+

Then, use install_azureml() to install the compiled code from the AzureML Python SDK.

+ +

Now, you’re ready to get started!

+

For a more detailed walk-through of the installation process, advanced options, and troubleshooting, see our Installation Guide.

+
+
+

+Getting Started

+

To begin running experiments with Azure Machine Learning, you must establish a connection to your Azure Machine Learning workspace.

+
    +
  1. +

    If you don’t already have a workspace created, you can create one by doing:

    + +

    After the workspace is created, you can save it to a configuration file to the local machine.

    + +
  2. +
  3. +

    If you have an existing workspace associated with your subscription, you can retrieve it from the server by doing:

    + +

    Or, if you have the workspace config.json file on your local machine, you can load the workspace by doing:

    + +

    Once you’ve accessed your workspace, you can begin running and tracking your own experiments with Azure Machine Learning SDK for R.

    +
  4. +
+

Take a look at our code samples and end-to-end vignettes for examples of what’s possible with the SDK!

+
+ +
+

+Contribute

+

We welcome contributions from the community. If you would like to contribute to the repository, please refer to the contribution guide.

+
+
+

+Code of Conduct

+

This project has adopted the Microsoft Open Source Code of Conduct. For more information see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments.

+
+
+
+ + +
+ + +
+ +
+

Site built with pkgdown 1.4.1.

+
+ +
+
+ + + + + + diff --git a/docs/link.svg b/docs/link.svg new file mode 100644 index 00000000..88ad8276 --- /dev/null +++ b/docs/link.svg @@ -0,0 +1,12 @@ + + + + + + diff --git a/docs/pkgdown.css b/docs/pkgdown.css new file mode 100644 index 00000000..91459581 --- /dev/null +++ b/docs/pkgdown.css @@ -0,0 +1,256 @@ +/* Sticky footer */ + +/** + * Basic idea: https://philipwalton.github.io/solved-by-flexbox/demos/sticky-footer/ + * Details: https://github.com/philipwalton/solved-by-flexbox/blob/master/assets/css/components/site.css + * + * .Site -> body > .container + * .Site-content -> body > .container .row + * .footer -> footer + * + * Key idea seems to be to ensure that .container and __all its parents__ + * have height set to 100% + * + */ + +html, body { + height: 100%; +} + +body > .container { + display: flex; + height: 100%; + flex-direction: column; +} + +body > .container .row { + flex: 1 0 auto; +} + +footer { + margin-top: 45px; + padding: 35px 0 36px; + border-top: 1px solid #e5e5e5; + color: #666; + display: flex; + flex-shrink: 0; +} +footer p { + margin-bottom: 0; +} +footer div { + flex: 1; +} +footer .pkgdown { + text-align: right; +} +footer p { + margin-bottom: 0; +} + +img.icon { + float: right; +} + +img { + max-width: 100%; +} + +/* Fix bug in bootstrap (only seen in firefox) */ +summary { + display: list-item; +} + +/* Typographic tweaking ---------------------------------*/ + +.contents .page-header { + margin-top: calc(-60px + 1em); +} + +/* Section anchors ---------------------------------*/ + +a.anchor { + margin-left: -30px; + display:inline-block; + width: 30px; + height: 30px; + visibility: hidden; + + background-image: url(./link.svg); + background-repeat: no-repeat; + background-size: 20px 20px; + background-position: center center; +} + +.hasAnchor:hover a.anchor { + visibility: visible; +} + +@media (max-width: 767px) { + .hasAnchor:hover a.anchor { + visibility: hidden; + } +} + + +/* Fixes for fixed navbar --------------------------*/ + +.contents h1, .contents h2, .contents h3, .contents h4 { + padding-top: 60px; + margin-top: -40px; +} + +/* Sidebar --------------------------*/ + +#sidebar { + margin-top: 30px; + position: -webkit-sticky; + position: sticky; + top: 70px; +} +#sidebar h2 { + font-size: 1.5em; + margin-top: 1em; +} + +#sidebar h2:first-child { + margin-top: 0; +} + +#sidebar .list-unstyled li { + margin-bottom: 0.5em; +} + +.orcid { + height: 16px; + /* margins are required by official ORCID trademark and display guidelines */ + margin-left:4px; + margin-right:4px; + vertical-align: middle; +} + +/* Reference index & topics ----------------------------------------------- */ + +.ref-index th {font-weight: normal;} + +.ref-index td {vertical-align: top;} +.ref-index .icon {width: 40px;} +.ref-index .alias {width: 40%;} +.ref-index-icons .alias {width: calc(40% - 40px);} +.ref-index .title {width: 60%;} + +.ref-arguments th {text-align: right; padding-right: 10px;} +.ref-arguments th, .ref-arguments td {vertical-align: top;} +.ref-arguments .name {width: 20%;} +.ref-arguments .desc {width: 80%;} + +/* Nice scrolling for wide elements --------------------------------------- */ + +table { + display: block; + overflow: auto; +} + +/* Syntax highlighting ---------------------------------------------------- */ + +pre { + word-wrap: normal; + word-break: normal; + border: 1px solid #eee; +} + +pre, code { + background-color: #f8f8f8; + color: #333; +} + +pre code { + overflow: auto; + word-wrap: normal; + white-space: pre; +} + +pre .img { + margin: 5px 0; +} + +pre .img img { + background-color: #fff; + display: block; + height: auto; +} + +code a, pre a { + color: #375f84; +} + +a.sourceLine:hover { + text-decoration: none; +} + +.fl {color: #1514b5;} +.fu {color: #000000;} /* function */ +.ch,.st {color: #036a07;} /* string */ +.kw {color: #264D66;} /* keyword */ +.co {color: #888888;} /* comment */ + +.message { color: black; font-weight: bolder;} +.error { color: orange; font-weight: bolder;} +.warning { color: #6A0366; font-weight: bolder;} + +/* Clipboard --------------------------*/ + +.hasCopyButton { + position: relative; +} + +.btn-copy-ex { + position: absolute; + right: 0; + top: 0; + visibility: hidden; +} + +.hasCopyButton:hover button.btn-copy-ex { + visibility: visible; +} + +/* headroom.js ------------------------ */ + +.headroom { + will-change: transform; + transition: transform 200ms linear; +} +.headroom--pinned { + transform: translateY(0%); +} +.headroom--unpinned { + transform: translateY(-100%); +} + +/* mark.js ----------------------------*/ + +mark { + background-color: rgba(255, 255, 51, 0.5); + border-bottom: 2px solid rgba(255, 153, 51, 0.3); + padding: 1px; +} + +/* vertical spacing after htmlwidgets */ +.html-widget { + margin-bottom: 10px; +} + +/* fontawesome ------------------------ */ + +.fab { + font-family: "Font Awesome 5 Brands" !important; +} + +/* don't display links in code chunks when printing */ +/* source: https://stackoverflow.com/a/10781533 */ +@media print { + code a:link:after, code a:visited:after { + content: ""; + } +} diff --git a/docs/pkgdown.js b/docs/pkgdown.js new file mode 100644 index 00000000..087a7622 --- /dev/null +++ b/docs/pkgdown.js @@ -0,0 +1,113 @@ +/* http://gregfranko.com/blog/jquery-best-practices/ */ +(function($) { + $(function() { + + $('.navbar-fixed-top').headroom(); + + $('body').css('padding-top', $('.navbar').height() + 10); + $(window).resize(function(){ + $('body').css('padding-top', $('.navbar').height() + 10); + }); + + $('body').scrollspy({ + target: '#sidebar', + offset: 60 + }); + + $('[data-toggle="tooltip"]').tooltip(); + + var cur_path = paths(location.pathname); + var links = $("#navbar ul li a"); + var max_length = -1; + var pos = -1; + for (var i = 0; i < links.length; i++) { + if (links[i].getAttribute("href") === "#") + continue; + // Ignore external links + if (links[i].host !== location.host) + continue; + + var nav_path = paths(links[i].pathname); + + var length = prefix_length(nav_path, cur_path); + if (length > max_length) { + max_length = length; + pos = i; + } + } + + // Add class to parent
  • , and enclosing
  • if in dropdown + if (pos >= 0) { + var menu_anchor = $(links[pos]); + menu_anchor.parent().addClass("active"); + menu_anchor.closest("li.dropdown").addClass("active"); + } + }); + + function paths(pathname) { + var pieces = pathname.split("/"); + pieces.shift(); // always starts with / + + var end = pieces[pieces.length - 1]; + if (end === "index.html" || end === "") + pieces.pop(); + return(pieces); + } + + // Returns -1 if not found + function prefix_length(needle, haystack) { + if (needle.length > haystack.length) + return(-1); + + // Special case for length-0 haystack, since for loop won't run + if (haystack.length === 0) { + return(needle.length === 0 ? 0 : -1); + } + + for (var i = 0; i < haystack.length; i++) { + if (needle[i] != haystack[i]) + return(i); + } + + return(haystack.length); + } + + /* Clipboard --------------------------*/ + + function changeTooltipMessage(element, msg) { + var tooltipOriginalTitle=element.getAttribute('data-original-title'); + element.setAttribute('data-original-title', msg); + $(element).tooltip('show'); + element.setAttribute('data-original-title', tooltipOriginalTitle); + } + + if(ClipboardJS.isSupported()) { + $(document).ready(function() { + var copyButton = ""; + + $(".examples, div.sourceCode").addClass("hasCopyButton"); + + // Insert copy buttons: + $(copyButton).prependTo(".hasCopyButton"); + + // Initialize tooltips: + $('.btn-copy-ex').tooltip({container: 'body'}); + + // Initialize clipboard: + var clipboardBtnCopies = new ClipboardJS('[data-clipboard-copy]', { + text: function(trigger) { + return trigger.parentNode.textContent; + } + }); + + clipboardBtnCopies.on('success', function(e) { + changeTooltipMessage(e.trigger, 'Copied!'); + e.clearSelection(); + }); + + clipboardBtnCopies.on('error', function() { + changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy'); + }); + }); + } +})(window.jQuery || window.$) diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml new file mode 100644 index 00000000..7d256fa4 --- /dev/null +++ b/docs/pkgdown.yml @@ -0,0 +1,8 @@ +pandoc: 2.2.3.2 +pkgdown: 1.4.1 +pkgdown_sha: ~ +articles: + cnn-tuning-with-hyperdrive: cnn-tuning-with-hyperdrive/cnn-tuning-with-hyperdrive.html + installation: installation.html + train-with-tensorflow: train-with-tensorflow/train-with-tensorflow.html + diff --git a/docs/reference/aci_webservice_deployment_config.html b/docs/reference/aci_webservice_deployment_config.html new file mode 100644 index 00000000..33c12ed5 --- /dev/null +++ b/docs/reference/aci_webservice_deployment_config.html @@ -0,0 +1,247 @@ + + + + + + + + +Create a configuration object for deploying an ACI Webservice. — aci_webservice_deployment_config • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Create a configuration object for deploying an ACI Webservice.

    + +
    + +
    aci_webservice_deployment_config(cpu_cores = NULL, memory_gb = NULL,
    +  tags = NULL, properties = NULL, description = NULL,
    +  location = NULL, auth_enabled = NULL, ssl_enabled = NULL,
    +  enable_app_insights = NULL, ssl_cert_pem_file = NULL,
    +  ssl_key_pem_file = NULL, ssl_cname = NULL, dns_name_label = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    cpu_cores

    The number of cpu cores to allocate for this Webservice. +Can be a decimal.

    memory_gb

    The amount of memory (in GB) to allocate for this +Webservice. Can be a decimal. Defaults to 0.5

    tags

    Dictionary of key value tags to give this Webservice

    properties

    Dictionary of key value properties to give this +Webservice. These properties cannot be changed after deployment, however +new key value pairs can be added

    description

    A description to give this Webservice

    location

    The Azure region to deploy this Webservice to. If not +specified the Workspace location will be used. More details on available +regions can be found here: +https://azure.microsoft.com/en-us/global-infrastructure/services/?regions=all&products=container-instances

    auth_enabled

    Whether or not to enable auth for this Webservice. +Defaults to FALSE

    ssl_enabled

    Whether or not to enable SSL for this Webservice. +Defaults to FALSE

    enable_app_insights

    Whether or not to enable AppInsights for this +Webservice. Defaults to FALSE

    ssl_cert_pem_file

    The cert file needed if SSL is enabled

    ssl_key_pem_file

    The key file needed if SSL is enabled

    ssl_cname

    The cname for if SSL is enabled

    dns_name_label

    The dns name label for the scoring endpoint. +If not specified a unique dns name label will be generated for the scoring +endpoint.

    + +

    Value

    + +

    AciServiceDeploymentConfiguration object to use when deploying a +Webservice object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/aks_webservice_deployment_config.html b/docs/reference/aks_webservice_deployment_config.html new file mode 100644 index 00000000..58dc4bda --- /dev/null +++ b/docs/reference/aks_webservice_deployment_config.html @@ -0,0 +1,324 @@ + + + + + + + + +Create a configuration object for deploying to an AKS compute target. — aks_webservice_deployment_config • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Create a configuration object for deploying to an AKS compute target.

    + +
    + +
    aks_webservice_deployment_config(autoscale_enabled = NULL,
    +  autoscale_min_replicas = NULL, autoscale_max_replicas = NULL,
    +  autoscale_refresh_seconds = NULL,
    +  autoscale_target_utilization = NULL, auth_enabled = NULL,
    +  cpu_cores = NULL, memory_gb = NULL, enable_app_insights = NULL,
    +  scoring_timeout_ms = NULL, replica_max_concurrent_requests = NULL,
    +  max_request_wait_time = NULL, num_replicas = NULL,
    +  primary_key = NULL, secondary_key = NULL, tags = NULL,
    +  properties = NULL, description = NULL, gpu_cores = NULL,
    +  period_seconds = NULL, initial_delay_seconds = NULL,
    +  timeout_seconds = NULL, success_threshold = NULL,
    +  failure_threshold = NULL, namespace = NULL,
    +  token_auth_enabled = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    autoscale_enabled

    Whether or not to enable autoscaling for this +Webservice. Defaults to True if num_replicas is None

    autoscale_min_replicas

    The minimum number of containers to use when +autoscaling this Webservice. Defaults to 1

    autoscale_max_replicas

    The maximum number of containers to use when +autoscaling this Webservice. Defaults to 10

    autoscale_refresh_seconds

    How often the autoscaler should attempt to +scale this Webservice. Defaults to 1

    autoscale_target_utilization

    The target utilization (in percent out +of 100) the autoscaler should attempt to maintain for this Webservice. +Defaults to 70

    auth_enabled

    Whether or not to enable key auth for this Webservice. +Defaults to TRUE

    cpu_cores

    The number of cpu cores to allocate for this Webservice. +Can be a decimal. Defaults to 0.1

    memory_gb

    The amount of memory (in GB) to allocate for this +Webservice. Can be a decimal. Defaults to 0.5

    enable_app_insights

    Whether or not to enable Application Insights +logging for this Webservice. Defaults to FALSE

    scoring_timeout_ms

    A timeout to enforce for scoring calls to this +Webservice. Defaults to 60000

    replica_max_concurrent_requests

    The number of maximum concurrent +requests per node to allow for this Webservice. Defaults to 1

    max_request_wait_time

    The maximum amount of time a request will stay +in the queue (in milliseconds) before returning a 503 error. Defaults to 500

    num_replicas

    The number of containers to allocate for this +Webservice. No default, if this parameter is not set then the autoscaler is +enabled by default.

    primary_key

    A primary auth key to use for this Webservice

    secondary_key

    A secondary auth key to use for this Webservice

    tags

    Dictionary of key value tags to give this Webservice

    properties

    Dictionary of key value properties to give this +Webservice. These properties cannot be changed after deployment, however new +key value pairs can be added

    description

    A description to give this Webservice

    gpu_cores

    The number of gpu cores to allocate for this Webservice. +Default is 1

    period_seconds

    How often (in seconds) to perform the liveness probe. +Default to 10 seconds. Minimum value is 1.

    initial_delay_seconds

    Number of seconds after the container has +started before liveness probes are initiated. Defaults to 310

    timeout_seconds

    Number of seconds after which the liveness probe +times out. Defaults to 2 second. Minimum value is 1

    success_threshold

    Minimum consecutive successes for the liveness +probe to be considered successful after having failed. Defaults to 1. +Minimum value is 1.

    failure_threshold

    When a Pod starts and the liveness probe fails, +Kubernetes will try failureThreshold times before giving up. Defaults to 3. +Minimum value is 1.

    namespace

    The Kubernetes namespace in which to deploy this +Webservice: up to 63 lowercase alphanumeric ('a'-'z', '0'-'9') and hyphen +('-') characters. The first and last characters cannot be hyphens.

    token_auth_enabled

    Whether or not to enable Token auth for this +Webservice. If this is enabled, users can access this Webservice by fetching +access token using their Azure Active Directory credentials. +Defaults to FALSE

    + +

    Value

    + +

    AksServiceDeploymentConfiguration object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/attach_aks_compute.html b/docs/reference/attach_aks_compute.html new file mode 100644 index 00000000..6408430f --- /dev/null +++ b/docs/reference/attach_aks_compute.html @@ -0,0 +1,222 @@ + + + + + + + + +Attach an existing AKS cluster to a workspace — attach_aks_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    If you already have an AKS cluster in your Azure subscription, and it is +version 1.12.##, you can attach it to your workspace to use for deployments. +The existing AKS cluster can be in a different Azure region than your +workspace.

    +

    If you want to secure your AKS cluster using an Azure Virtual Network, you +must create the virtual network first. For more information, see +Secure Azure ML experimentation and inference jobs within an Azure Virtual Network

    + +
    + +
    attach_aks_compute(workspace, cluster_name, resource_id = NULL,
    +  resource_group = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    workspace

    The Workspace object to attach the AKS cluster to.

    cluster_name

    A string of the name for the cluster.

    resource_id

    A string of the resource ID for the AKS cluster being +attached.

    resource_group

    A string of the resource group in which the AKS cluster +is located.

    + +

    Value

    + +

    The AksCompute object.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +compute_target <- attach_aks_compute(ws,
    +                                     cluster_name = 'mycluster',
    +                                     resource_id = 'myresourceid',
    +                                     resource_group = 'myresourcegroup')
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/azureml.html b/docs/reference/azureml.html new file mode 100644 index 00000000..f824d99b --- /dev/null +++ b/docs/reference/azureml.html @@ -0,0 +1,182 @@ + + + + + + + + +azureml module +User can access functions/modules in azureml that are not exposed through the +exported R functions. — azureml • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    azureml module +User can access functions/modules in azureml that are not exposed through the +exported R functions.

    + +
    + +
    azureml
    + +

    Format

    + +

    An object of class python.builtin.module (inherits from python.builtin.object) of length 5.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/bandit_policy.html b/docs/reference/bandit_policy.html new file mode 100644 index 00000000..e8b4018d --- /dev/null +++ b/docs/reference/bandit_policy.html @@ -0,0 +1,256 @@ + + + + + + + + +Define a Bandit policy for early termination of HyperDrive runs — bandit_policy • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Bandit is an early termination policy based on slack factor/slack amount +and evaluation interval. The policy early terminates any runs where the +primary metric is not within the specified slack factor/slack amount with +respect to the best performing training run.

    + +
    + +
    bandit_policy(slack_factor = NULL, slack_amount = NULL,
    +  evaluation_interval = 1L, delay_evaluation = 0L)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    slack_factor

    A double of the ratio of the allowed distance from +the best performing run.

    slack_amount

    A double of the absolute distance allowed from the +best performing run.

    evaluation_interval

    An integer of the frequency for applying policy.

    delay_evaluation

    An integer of the number of intervals for which to +delay the first evaluation.

    + +

    Value

    + +

    The BanditPolicy object.

    + +

    Details

    + + + +

    The Bandit policy takes the following configuration parameters:

      +
    • slack_factor or slack_amount: The slack allowed with respect to +the best performing training run. slack_factor specifies the +allowable slack as a ration. slack_amount specifies the allowable +slack as an absolute amount, instead of a ratio.

    • +
    • evaluation_interval: Optional. The frequency for applying the policy. +Each time the training script logs the primary metric counts as one +interval.

    • +
    • delay_evaluation: Optional. The number of intervals to delay the +policy evaluation. Use this parameter to avoid premature termination +of training runs. If specified, the policy applies every multiple of +evaluation_interval that is greater than or equal to delay_evaluation.

    • +
    + +

    Any run that doesn't fall within the slack factor or slack amount of the +evaluation metric with respect to the best performing run will be +terminated.

    +

    Consider a Bandit policy with slack_factor = 0.2 and +evaluation_interval = 100. Assume that run X is the currently best +performing run with an AUC (performance metric) of 0.8 after 100 intervals. +Further, assume the best AUC reported for a run is Y. This policy compares +the value (Y + Y * 0.2) to 0.8, and if smaller, cancels the run. +If delay_evaluation = 200, then the first time the policy will be applied +is at interval 200.

    +

    Now, consider a Bandit policy with slack_amount = 0.2 and +evaluation_interval = 100. If run 3 is the currently best performing run +with an AUC (performance metric) of 0.8 after 100 intervals, then any run +with an AUC less than 0.6 (0.8 - 0.2) after 100 iterations will be +terminated. Similarly, the delay_evaluation can also be used to delay the +first termination policy evaluation for a specific number of sequences.

    + +

    Examples

    + + + +

    In this example, the early termination policy is applied at every interval +when metrics are reported, starting at evaluation interval 5. Any run whose +best metric is less than +1 / (1 + 0.1) or 91% of the best performing run will be terminated.

    early_termination_policy = bandit_policy(slack_factor = 0.1,
    +                                         evaluation_interval = 1L,
    +                                         delay_evaluation = 5L)
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/bayesian_parameter_sampling.html b/docs/reference/bayesian_parameter_sampling.html new file mode 100644 index 00000000..d99c5e6f --- /dev/null +++ b/docs/reference/bayesian_parameter_sampling.html @@ -0,0 +1,225 @@ + + + + + + + + +Define Bayesian sampling over a hyperparameter search space — bayesian_parameter_sampling • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Bayesian sampling is based on the Bayesian optimization algorithm and makes +intelligent choices on the hyperparameter values to sample next. It picks +the sample based on how the previous samples performed, such that the new +sample improves the reported primary metric.

    + +
    + +
    bayesian_parameter_sampling(parameter_space)
    + +

    Arguments

    + + + + + + +
    parameter_space

    A named list containing each parameter and its +distribution, e.g. list("parameter" = distribution).

    + +

    Value

    + +

    The BayesianParameterSampling object.

    + +

    Details

    + + + +

    When you use Bayesian sampling, the number of concurrent runs has an impact +on the effectiveness of the tuning process. Typically, a smaller number of +concurrent runs can lead to better sampling convergence, since the smaller +degree of parallelism increases the number of runs that benefit from +previously completed runs.

    +

    Bayesian sampling only supports choice(), uniform(), and quniform() +distributions over the search space.

    +

    Bayesian sampling does not support any early termination policy. When +using Bayesian parameter sampling, early_termination_policy must be +NULL.

    + +

    Examples

    + + +
    param_sampling <- bayesian_parameter_sampling( {
    +        'learning_rate': uniform(0.05, 0.1),
    +        'batch_size': choice(16, 32, 64, 128)
    +    }
    +)
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/cancel_run.html b/docs/reference/cancel_run.html new file mode 100644 index 00000000..36faf489 --- /dev/null +++ b/docs/reference/cancel_run.html @@ -0,0 +1,182 @@ + + + + + + + + +Cancel run — cancel_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Cancel run

    + +
    + +
    cancel_run(run)
    + +

    Arguments

    + + + + + + +
    run

    run to be cancelled

    + +

    Value

    + +

    TRUE if cancellation was successful, else FALSE

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/choice.html b/docs/reference/choice.html new file mode 100644 index 00000000..5ea75f18 --- /dev/null +++ b/docs/reference/choice.html @@ -0,0 +1,192 @@ + + + + + + + + +Specify a discrete set of options to sample from — choice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a discrete set of options to sample the hyperparameters +from.

    + +
    + +
    choice(options)
    + +

    Arguments

    + + + + + + +
    options

    A list of discrete values to choose from, or +one or more comma-separated discrete values to choose from.

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/container_registry.html b/docs/reference/container_registry.html new file mode 100644 index 00000000..ec656ab1 --- /dev/null +++ b/docs/reference/container_registry.html @@ -0,0 +1,207 @@ + + + + + + + + +Specify Azure Container Registry details — container_registry • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns a ContainerRegistry object with the details for an +Azure Container Registry (ACR). This is needed when a custom +Docker image used for training or deployment is located in +a private image registry. Provide a ContainerRegistry object +to the image_registry_details parameter of either r_environment() +or estimator().

    + +
    + +
    container_registry(address = NULL, username = NULL, password = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    address

    A string of the DNS name or IP address of the +Azure Container Registry (ACR).

    username

    A string of the username for ACR.

    password

    A string of the password for ACR.

    + +

    Value

    + +

    The ContainerRegistry object.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/create_aks_compute.html b/docs/reference/create_aks_compute.html new file mode 100644 index 00000000..bd92f22f --- /dev/null +++ b/docs/reference/create_aks_compute.html @@ -0,0 +1,289 @@ + + + + + + + + +Create an AksCompute cluster — create_aks_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Provision an Azure Kubernetes Service instance (AksCompute) as a compute +target for web service deployment. AksCompute is recommended for high-scale +production deployments and provides fast response time and autoscaling of +the deployed service. Cluster autoscaling isn't supported through the Azure +ML R SDK. To change the nodes in the AksCompute cluster, use the UI for the +cluster in the Azure portal. Once created, the cluster can be reused for +multiple deployments.

    + +
    + +
    create_aks_compute(workspace, cluster_name, agent_count = NULL,
    +  vm_size = NULL, ssl_cname = NULL, ssl_cert_pem_file = NULL,
    +  ssl_key_pem_file = NULL, location = NULL,
    +  vnet_resourcegroup_name = NULL, vnet_name = NULL,
    +  subnet_name = NULL, service_cidr = NULL, dns_service_ip = NULL,
    +  docker_bridge_cidr = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    workspace

    The Workspace object.

    cluster_name

    A string of the name of the cluster.

    agent_count

    An integer of the number of agents (VMs) to host +containers. Defaults to 3.

    vm_size

    A string of the size of agent VMs. More details can be found +here. +Note that not all sizes are available in all regions, as detailed in the +aformentioned link. Defaults to 'Standard_D3_v2'.

    ssl_cname

    A string of a CName to use if enabling SSL validation on +the cluster. Must provide all three - CName, cert file, and key file - to +enable SSL validation.

    ssl_cert_pem_file

    A string of a file path to a file containing cert +information for SSL validation. Must provide all three - CName, cert file, +and key file - to enable SSL validation.

    ssl_key_pem_file

    A string of a file path to a file containing key +information for SSL validation. Must provide all three - CName, cert file, +and key file - to enable SSL validation.

    location

    A string of the location to provision the cluster in. If not +specified, defaults to the workspace location. Available regions for this +compute can be found here: +"https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=kubernetes-service".

    vnet_resourcegroup_name

    A string of the name of the resource group +where the virtual network is located.

    vnet_name

    A string of the name of the virtual network.

    subnet_name

    A string of the name of the subnet inside the vnet.

    service_cidr

    A string of a CIDR notation IP range from which to assign +service cluster IPs.

    dns_service_ip

    A string of the container's DNS server IP address.

    docker_bridge_cidr

    A string of a CIDR notation IP for Docker bridge.

    + +

    Value

    + +

    An AksCompute object.

    + +

    Details

    + + + +

    For more information on using an AksCompute resource within a virtual +network, see +Secure Azure ML experimentation and inference jobs within an Azure Virtual Network

    + +

    Examples

    + + + +

    Create an AksCompute cluster using the default configuration (you can also +provide parameters to customize this).

    ws <- load_workspace_from_config()
    +compute_target <- create_aks_compute(ws, cluster_name = 'mycluster')
    +wait_for_provisioning_completion(compute_target)
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/create_aml_compute.html b/docs/reference/create_aml_compute.html new file mode 100644 index 00000000..b26a167d --- /dev/null +++ b/docs/reference/create_aml_compute.html @@ -0,0 +1,305 @@ + + + + + + + + +Create an AmlCompute cluster — create_aml_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Provision Azure Machine Learning Compute (AmlCompute) as a compute target +for training. AmlCompute is a managed-compute infrastructure that allows the +user to easily create a single or multi-node compute. To create a persistent +AmlCompute resource that can be reused across jobs, make sure to specify the +vm_size and max_nodes parameters. The compute can then be shared with +other users in the workspace and is kept between jobs. If min_nodes = 0, +the compute autoscales down to zero nodes when it isn't used, and scales up +automatically when a job is submitted.

    +

    AmlCompute has default limits, such as the number of cores that can be +allocated. For more information, see +Manage and request quotas for Azure resources.

    + +
    + +
    create_aml_compute(workspace, cluster_name, vm_size,
    +  vm_priority = "dedicated", min_nodes = 0, max_nodes = NULL,
    +  idle_seconds_before_scaledown = NULL, admin_username = NULL,
    +  admin_user_password = NULL, admin_user_ssh_key = NULL,
    +  vnet_resourcegroup_name = NULL, vnet_name = NULL,
    +  subnet_name = NULL, tags = NULL, description = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    workspace

    The Workspace object.

    cluster_name

    A string of the name of the cluster.

    vm_size

    A string of the size of agent VMs. More details can be found +here. +Note that not all sizes are available in all regions, as detailed in the +aformentioned link. Defaults to 'Standard_NC6'.

    vm_priority

    A string of either 'dedicated' or 'lowpriority' to +use either dedicated or low-priority VMs. Defaults to 'dedicated'.

    min_nodes

    An integer of the minimum number of nodes to use on the +cluster. If not specified, will default to 0.

    max_nodes

    An integer of the maximum number of nodes to use on the +cluster.

    idle_seconds_before_scaledown

    An integer of the node idle time in +seconds before scaling down the cluster. Defaults to 120.

    admin_username

    A string of the name of the administrator user account +that can be used to SSH into nodes.

    admin_user_password

    A string of the password of the administrator user +account.

    admin_user_ssh_key

    A string of the SSH public key of the administrator +user account.

    vnet_resourcegroup_name

    A string of the name of the resource group +where the virtual network is located.

    vnet_name

    A string of the name of the virtual network.

    subnet_name

    A string of the name of the subnet inside the vnet.

    tags

    A named list of tags for the cluster, e.g. +list("tag" = "value").`

    description

    A string of the description for the cluster.

    + +

    Value

    + +

    The AmlCompute object.

    + +

    Details

    + + + +

    For more information on using an Azure Machine Learning Compute resource +in a virtual network, see +Secure Azure ML experimentation and inference jobs within an Azure Virtual Network.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +compute_target <- create_aml_compute(ws,
    +                                     cluster_name = 'mycluster',
    +                                     vm_size = 'STANDARD_D2_V2',
    +                                     max_nodes = 1)
    +wait_for_provisioning_completion(compute_target, show_output = TRUE)
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/create_workspace.html b/docs/reference/create_workspace.html new file mode 100644 index 00000000..e9c0969a --- /dev/null +++ b/docs/reference/create_workspace.html @@ -0,0 +1,307 @@ + + + + + + + + +Create a new Azure Machine Learning workspace — create_workspace • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Create a new Azure Machine Learning workspace. Throws an exception if the +workspace already exists or any of the workspace requirements are not +satisfied. When you create new workspace, it automatically creates several +Azure resources that are used in the workspace:

      +
    • Azure Container Registry: Registers Docker containers that you use during +training and when you deploy a model. To minimize costs, ACR is +lazy-loaded until deployment images are created.

    • +
    • Azure Storage account: Used as the default datastore for the workspace.

    • +
    • Azure Application Insights: Stores monitoring information about your +models.

    • +
    • Azure Key Vault: Stores secrets that are used by compute targets and other +sensitive information that's needed by the workspace.

    • +
    + + +
    + +
    create_workspace(name, subscription_id = NULL, resource_group = NULL,
    +  location = NULL, create_resource_group = TRUE,
    +  friendly_name = NULL, storage_account = NULL, key_vault = NULL,
    +  app_insights = NULL, container_registry = NULL, exist_ok = FALSE,
    +  show_output = TRUE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    name

    A string of the new workspace name. Workspace name has to be +between 2 and 32 characters of letters and numbers.

    subscription_id

    A string of the subscription ID of the containing +subscription for the new workspace. The parameter is required if the user has +access to more than one subscription.

    resource_group

    A string of the Azure resource group that is containing +the workspace. The parameter defaults to a mutation of the workspace name.

    location

    A string of the location of the workspace. The parameter +defaults to the resource group location. The location has to be a supported +region for Azure Machine Learning Services.

    create_resource_group

    If TRUE the resource group will be created +if it doesn't exist.

    friendly_name

    A string of the friendly name for the workspace that +can be displayed in the UI.

    storage_account

    A string of an existing storage account in the Azure +resource ID format. The storage will be used by the workspace to save run +outputs, code, logs etc. If NULL a new storage will be created.

    key_vault

    A string of an existing key vault in the Azure resource ID +format. The key vault will be used by the workspace to store credentials +added to the workspace by the users. If NULL a new key vault will be +created.

    app_insights

    A string of an existing Application Insights in the Azure +resource ID format. The Application Insights will be used by the workspace to +log webservices events. If NULL a new Application Insights will be created.

    container_registry

    A string of an existing container registry in the +Azure resource ID format. The container registry will be used by the +workspace to pull and push both experimentation and webservices images. If +NULL a new container registry will be created.

    exist_ok

    If TRUE the method will not fail if the workspace already +exists.

    show_output

    If TRUE the method will print out incremental progress +of method.

    + +

    Value

    + +

    The Workspace object.

    + +

    Usage

    + + + +

    The first example requires only minimal specification, and all dependent +resources as well as the resource group will be created automatically.

    ws <- create_workspace(name = 'myworkspace',
    +                       subscription_id = '<azure-subscription-id>',
    +                       resource_group = 'myresourcegroup',
    +                       location = 'eastus2')
    +
    + +

    The following example shows how to reuse existing Azure resources by making +use of all parameters utilizing the Azure resource ID format. The specific +Azure resource IDs can be retrieved through the Azure Portal or SDK. This +assumes that the resource group, storage account, key vault, App Insights +and container registry already exist.

    ws <- create_workspace(
    +       name = 'myworkspace',
    +       subscription_id = '<azure-subscription-id>',
    +       resource_group = 'myresourcegroup',
    +       create_resource_group = FALSE,
    +       location = 'eastus2',
    +       friendly_name = 'My workspace',
    +       storage_account = 'subscriptions/<azure-subscription-id>/resourcegroups/myresourcegroup/providers/microsoft.storage/storageaccounts/mystorageaccount',
    +       key_vault = 'subscriptions/<azure-subscription-id>/resourcegroups/myresourcegroup/providers/microsoft.keyvault/vaults/mykeyvault',
    +       app_insights = 'subscriptions/<azure-subscription-id>/resourcegroups/myresourcegroup/providers/microsoft.insights/components/myappinsights',
    +       container_registry = 'subscriptions/<azure-subscription-id>/resourcegroups/myresourcegroup/providers/microsoft.containerregistry/registries/mycontainerregistry')
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/delete_compute.html b/docs/reference/delete_compute.html new file mode 100644 index 00000000..c713f330 --- /dev/null +++ b/docs/reference/delete_compute.html @@ -0,0 +1,189 @@ + + + + + + + + +Delete a cluster — delete_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Remove the compute object from its associated workspace and delete the +corresponding cloud-based resource.

    + +
    + +
    delete_compute(cluster)
    + +

    Arguments

    + + + + + + +
    cluster

    The AmlCompute or AksCompute object.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +compute_target <- get_compute(ws, cluster_name = 'mycluster')
    +delete_compute(compute_target)
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/delete_local_webservice.html b/docs/reference/delete_local_webservice.html new file mode 100644 index 00000000..120767fe --- /dev/null +++ b/docs/reference/delete_local_webservice.html @@ -0,0 +1,190 @@ + + + + + + + + +Delete this LocalWebservice from the local machine. +This function call is not asynchronous; it runs until the service is deleted. — delete_local_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Delete this LocalWebservice from the local machine. +This function call is not asynchronous; it runs until the service is deleted.

    + +
    + +
    delete_local_webservice(webservice, delete_cache = TRUE,
    +  delete_image = FALSE)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    webservice

    LocalWebservice object.

    delete_cache

    Delete temporary files cached for the service.

    delete_image

    Delete the service's Docker image.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/delete_model.html b/docs/reference/delete_model.html new file mode 100644 index 00000000..a1d8e867 --- /dev/null +++ b/docs/reference/delete_model.html @@ -0,0 +1,176 @@ + + + + + + + + +Delete this model from its associated workspace. — delete_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Delete this model from its associated workspace.

    + +
    + +
    delete_model(model)
    + +

    Arguments

    + + + + + + +
    model

    The model to download

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/delete_secrets.html b/docs/reference/delete_secrets.html new file mode 100644 index 00000000..dd28f49c --- /dev/null +++ b/docs/reference/delete_secrets.html @@ -0,0 +1,182 @@ + + + + + + + + +Delete secrets from a keyvault — delete_secrets • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Delete secrets from the keyvault associated with the workspace for +a specified set of secret names.

    + +
    + +
    delete_secrets(keyvault, secrets)
    + +

    Arguments

    + + + + + + + + + + +
    keyvault

    The Keyvault object.

    secrets

    A vector of secret names.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/delete_webservice.html b/docs/reference/delete_webservice.html new file mode 100644 index 00000000..22cf6b47 --- /dev/null +++ b/docs/reference/delete_webservice.html @@ -0,0 +1,176 @@ + + + + + + + + +Delete this Webservice from its associated workspace. — delete_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Delete this Webservice from its associated workspace.

    + +
    + +
    delete_webservice(webservice)
    + +

    Arguments

    + + + + + + +
    webservice

    The webservice object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/delete_workspace.html b/docs/reference/delete_workspace.html new file mode 100644 index 00000000..03eac0aa --- /dev/null +++ b/docs/reference/delete_workspace.html @@ -0,0 +1,189 @@ + + + + + + + + +Delete a workspace — delete_workspace • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Delete the Azure Machine Learning workspace resource. delete_workspace() +can also delete the workspace's associated resources.

    + +
    + +
    delete_workspace(workspace, delete_dependent_resources = FALSE,
    +  no_wait = FALSE)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    workspace

    The Workspace object of the workspace to delete.

    delete_dependent_resources

    If TRUE the workspace's associated +resources, i.e. ACR, storage account, key value, and application insights +will also be deleted.

    no_wait

    If FALSE do not wait for the workspace deletion to complete.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/deploy_model.html b/docs/reference/deploy_model.html new file mode 100644 index 00000000..aa937ded --- /dev/null +++ b/docs/reference/deploy_model.html @@ -0,0 +1,211 @@ + + + + + + + + +Deploy a Webservice from zero or more model objects. — deploy_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Deploy a Webservice from zero or more model objects.

    + +
    + +
    deploy_model(workspace, name, models, inference_config,
    +  deployment_config = NULL, deployment_target = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    workspace

    A Workspace object to associate the Webservice with

    name

    The name to give the deployed service. Must be unique to the +workspace, only consist of lowercase letters, numbers, or dashes, start with +a letter, and be between 3 and 32 characters long.

    models

    A list of model objects. Can be an empty list.

    inference_config

    An InferenceConfig object used to determine required +model properties.

    deployment_config

    A WebserviceDeploymentConfiguration used to +configure the webservice. If one is not provided, an empty configuration +object will be used based on the desired target.

    deployment_target

    A azureml.core.ComputeTarget to deploy the +Webservice to. As Azure Container Instances has no associated +azureml.core.ComputeTarget, leave this parameter as None to deploy to Azure +Container Instances.

    + +

    Value

    + +

    A Webservice object corresponding to the deployed webservice

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/deserialize_to_model.html b/docs/reference/deserialize_to_model.html new file mode 100644 index 00000000..e4f4f4ab --- /dev/null +++ b/docs/reference/deserialize_to_model.html @@ -0,0 +1,186 @@ + + + + + + + + +Convert a json object into a Model object. — deserialize_to_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Convert a json object into a Model object.

    + +
    + +
    deserialize_to_model(workspace, model_payload)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The workspace object the model is registered under

    model_payload

    A json object to convert to a Model object

    + +

    Value

    + +

    The Model representation of the provided json object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/deserialize_to_webservice.html b/docs/reference/deserialize_to_webservice.html new file mode 100644 index 00000000..e9ed3cfa --- /dev/null +++ b/docs/reference/deserialize_to_webservice.html @@ -0,0 +1,186 @@ + + + + + + + + +Convert a json object into a Webservice object. — deserialize_to_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Convert a json object into a Webservice object.

    + +
    + +
    deserialize_to_webservice(workspace, webservice_payload)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The workspace object the Webservice is registered under

    webservice_payload

    A json object to convert to a Webservice object

    + +

    Value

    + +

    The Webservice representation of the provided json object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/detach_aks_compute.html b/docs/reference/detach_aks_compute.html new file mode 100644 index 00000000..67b6d7d8 --- /dev/null +++ b/docs/reference/detach_aks_compute.html @@ -0,0 +1,180 @@ + + + + + + + + +Detach an AksCompute cluster from its associated workspace — detach_aks_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Detach the AksCompute cluster from its associated workspace. No +underlying cloud resource will be deleted; the association will +just be removed.

    + +
    + +
    detach_aks_compute(cluster)
    + +

    Arguments

    + + + + + + +
    cluster

    The AksCompute object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/download_file_from_run.html b/docs/reference/download_file_from_run.html new file mode 100644 index 00000000..d9ad335a --- /dev/null +++ b/docs/reference/download_file_from_run.html @@ -0,0 +1,184 @@ + + + + + + + + +Download an associated file from storage. — download_file_from_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Download an associated file from storage.

    + +
    + +
    download_file_from_run(run, name, output_file_path = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    run

    the run object

    name

    The name of the artifact to be downloaded

    output_file_path

    The local path where to store the artifact

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/download_files_from_run.html b/docs/reference/download_files_from_run.html new file mode 100644 index 00000000..75428f01 --- /dev/null +++ b/docs/reference/download_files_from_run.html @@ -0,0 +1,201 @@ + + + + + + + + +Download files from a given storage prefix (folder name) or +the entire container if prefix is unspecified. — download_files_from_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Download files from a given storage prefix (folder name) or +the entire container if prefix is unspecified.

    + +
    + +
    download_files_from_run(run, prefix = NULL, output_directory = NULL,
    +  output_paths = NULL, batch_size = 100L)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    run

    the run object

    prefix

    the filepath prefix within the container from +which to download all artifacts

    output_directory

    optional directory that all artifact paths use +as a prefix

    output_paths

    optional filepaths in which to store the downloaded +artifacts. Should be unique and match length of paths.

    batch_size

    number of files to download per batch

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/download_from_datastore.html b/docs/reference/download_from_datastore.html new file mode 100644 index 00000000..46832ea0 --- /dev/null +++ b/docs/reference/download_from_datastore.html @@ -0,0 +1,201 @@ + + + + + + + + +Download data from a datastore to the local file system — download_from_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Download data from the datastore to the local file system.

    + +
    + +
    download_from_datastore(datastore, target_path, prefix = NULL,
    +  overwrite = FALSE, show_progress = TRUE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    datastore

    The AzureBlobDatastore or AzureFileDatastore object.

    target_path

    A string of the local directory to download the file to.

    prefix

    A string of the path to the folder in the blob container +or file store to download. If NULL, will download everything in the blob +container or file share

    overwrite

    If TRUE, overwrites any existing data at target_path.

    show_progress

    If TRUE, show progress of upload in the console.

    + +

    Value

    + +

    An integer of the number of files successfully downloaded.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/download_model.html b/docs/reference/download_model.html new file mode 100644 index 00000000..33a5afcc --- /dev/null +++ b/docs/reference/download_model.html @@ -0,0 +1,192 @@ + + + + + + + + +Download model to target_dir of local file system. — download_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Download model to target_dir of local file system.

    + +
    + +
    download_model(model, target_dir = ".", exist_ok = FALSE)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    model

    The model to download

    target_dir

    Path to directory for where to download the model. +Defaults to "."

    exist_ok

    Boolean to replace downloaded dir/files if exists. +Defaults to FALSE

    + +

    Value

    + +

    string path to file or folder of model

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/estimator.html b/docs/reference/estimator.html new file mode 100644 index 00000000..b42aa939 --- /dev/null +++ b/docs/reference/estimator.html @@ -0,0 +1,295 @@ + + + + + + + + +Create an estimator — estimator • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    An Estimator wraps run configuration information for specifying details +of executing an R script. Running an Estimator experiment +(using submit_experiment()) will return a ScriptRun object and +execute your training script on the specified compute target.

    + +
    + +
    estimator(source_directory, compute_target = NULL, vm_size = NULL,
    +  vm_priority = NULL, entry_script = NULL, script_params = NULL,
    +  cran_packages = NULL, github_packages = NULL,
    +  custom_url_packages = NULL, custom_docker_image = NULL,
    +  image_registry_details = NULL, use_gpu = FALSE,
    +  environment_variables = NULL, shm_size = NULL,
    +  max_run_duration_seconds = NULL, environment = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    source_directory

    A string of the local directory containing +experiment configuration and code files needed for the training job.

    compute_target

    The AmlCompute object for the compute target +where training will happen.

    vm_size

    A string of the VM size of the compute target that will be +created for the training job. The list of available VM sizes +are listed here. +Provide this parameter if you want to create AmlCompute as the compute target +at run time, instead of providing an existing cluster to the compute_target +parameter. If vm_size is specified, a single-node cluster is automatically +created for your run and is deleted automatically once the run completes.

    vm_priority

    A string of either 'dedicated' or 'lowpriority' to +specify the VM priority of the compute target that will be created for the +training job. Defaults to 'dedicated'. This takes effect only when the +vm_size parameter is specified.

    entry_script

    A string representing the relative path to the file used +to start training.

    script_params

    A named list of the command-line arguments to pass to +the training script specified in entry_script.

    cran_packages

    A character vector of CRAN packages to be installed.

    github_packages

    A character vector of GitHub packages to be installed.

    custom_url_packages

    A character vector of packages to be installed +from local directory or custom URL.

    custom_docker_image

    A string of the name of the Docker image from +which the image to use for training will be built. If not set, a default +CPU-based image will be used as the base image. To use an image from a +private Docker repository, you will also have to specify the +image_registry_details parameter.

    image_registry_details

    A ContainerRegistry object of the details of +the Docker image registry for the custom Docker image.

    use_gpu

    Indicates whether the environment to run the experiment should +support GPUs. If TRUE, a GPU-based default Docker image will be used in the +environment. If FALSE, a CPU-based image will be used. Default Docker +images (CPU or GPU) will only be used if the custom_docker_image parameter +is not set.

    environment_variables

    A named list of environment variables names +and values. These environment variables are set on the process where the user +script is being executed.

    shm_size

    A string for the size of the Docker container's shared +memory block. For more information, see +Docker run reference. +If not set, a default value of '2g' is used.

    max_run_duration_seconds

    An integer of the maximum allowed time for +the run. Azure ML will attempt to automatically cancel the run if it takes +longer than this value.

    environment

    The Environment object that configures the R +environment where the experiment is executed. This parameter is mutually +exclusive with the other environment-related parameters custom_docker_image +, image_registry_details, use_gpu, environment_variables, shm_size, +cran_packages, github_packages, and custom_url_packages and if set +will take precedence over those parameters.

    + +

    Value

    + +

    The Estimator object.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/experiment.html b/docs/reference/experiment.html new file mode 100644 index 00000000..0fc4d052 --- /dev/null +++ b/docs/reference/experiment.html @@ -0,0 +1,204 @@ + + + + + + + + +Create an Azure Machine Learning experiment — experiment • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    An experiment is a grouping of many runs from a specified script.

    + +
    + +
    experiment(workspace, name)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The Workspace object.

    name

    A string of the experiment name. The name must be between +3-36 characters, start with a letter or number, and can only contain +letters, numbers, underscores, and dashes.

    + +

    Value

    + +

    The Experiment object.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +exp <- experiment(ws, name = 'myexperiment')
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/generate_docker_file.html b/docs/reference/generate_docker_file.html new file mode 100644 index 00000000..0cdb7dce --- /dev/null +++ b/docs/reference/generate_docker_file.html @@ -0,0 +1,192 @@ + + + + + + + + +Generate a dockerfile string to build the image for training. — generate_docker_file • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Generate a dockerfile string to build the image for training.

    + +
    + +
    generate_docker_file(custom_docker_image = NULL, cran_packages = NULL,
    +  github_packages = NULL, custom_url_packages = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    custom_docker_image

    The name of the docker image from which the image +to use for training will be built. If not set, a default CPU based image will +be used as the base image.

    cran_packages

    character vector of cran packages to be installed.

    github_packages

    character vector of github packages to be installed.

    custom_url_packages

    character vector of packages to be installed from +local, directory or custom url.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/generate_new_webservice_key.html b/docs/reference/generate_new_webservice_key.html new file mode 100644 index 00000000..957766ed --- /dev/null +++ b/docs/reference/generate_new_webservice_key.html @@ -0,0 +1,186 @@ + + + + + + + + +Regenerate one of the Webservice's keys. Must specify either 'Primary' or +'Secondary' key. — generate_new_webservice_key • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Regenerate one of the Webservice's keys. Must specify either 'Primary' or +'Secondary' key.

    + +
    + +
    generate_new_webservice_key(webservice, key_type)
    + +

    Arguments

    + + + + + + + + + + +
    webservice

    The webservice object.

    key_type

    Which key to regenerate. Options are 'Primary' or +'Secondary'

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/generate_score_python_wrapper.html b/docs/reference/generate_score_python_wrapper.html new file mode 100644 index 00000000..f1a62639 --- /dev/null +++ b/docs/reference/generate_score_python_wrapper.html @@ -0,0 +1,182 @@ + + + + + + + + +Generate _generated_score.py file for the corresponding entry_script file — generate_score_python_wrapper • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Generate _generated_score.py file for the corresponding entry_script file

    + +
    + +
    generate_score_python_wrapper(entry_script, source_directory)
    + +

    Arguments

    + + + + + + + + + + +
    entry_script

    Path to local file that contains the code to run for +the image.

    source_directory

    paths to folders that contains all files to +create the image.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_aks_compute_credentials.html b/docs/reference/get_aks_compute_credentials.html new file mode 100644 index 00000000..56c53ea5 --- /dev/null +++ b/docs/reference/get_aks_compute_credentials.html @@ -0,0 +1,182 @@ + + + + + + + + +Get the credentials for an AksCompute cluster — get_aks_compute_credentials • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Retrieved the credentials for an AksCompute cluster.

    + +
    + +
    get_aks_compute_credentials(cluster)
    + +

    Arguments

    + + + + + + +
    cluster

    The AksCompute object.

    + +

    Value

    + +

    A named list of the cluster credentials.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_best_run_by_primary_metric.html b/docs/reference/get_best_run_by_primary_metric.html new file mode 100644 index 00000000..b3dd2c1d --- /dev/null +++ b/docs/reference/get_best_run_by_primary_metric.html @@ -0,0 +1,209 @@ + + + + + + + + +Return the best performing run amongst all completed runs — get_best_run_by_primary_metric • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Find and return the run that corresponds to the best performing run +amongst all the completed runs.

    +

    The best performing run is identified solely based on the primary metric +parameter specified in the HyperDriveConfig (primary_metric_name). +The PrimaryMetricGoal governs whether the minimum or maximum of the +primary metric is used. To do a more detailed analysis of all the +run metrics launched by this HyperDrive run, use get_child_run_metrics(). +Only one of the runs is returned from get_best_run_by_primary_metric(), +even if several of the runs launched by this HyperDrive run reached +the same best metric.

    + +
    + +
    get_best_run_by_primary_metric(hyperdrive_run, include_failed = TRUE,
    +  include_canceled = TRUE)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    hyperdrive_run

    The HyperDriveRun object.

    include_failed

    If TRUE, include the failed runs.

    include_canceled

    If TRUE, include the canceled runs.

    + +

    Value

    + +

    The Run object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_child_run_hyperparameters.html b/docs/reference/get_child_run_hyperparameters.html new file mode 100644 index 00000000..f38d0b99 --- /dev/null +++ b/docs/reference/get_child_run_hyperparameters.html @@ -0,0 +1,185 @@ + + + + + + + + +Get the hyperparameters for all child runs — get_child_run_hyperparameters • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Return the hyperparameters for all the child runs of the +HyperDrive run.

    + +
    + +
    get_child_run_hyperparameters(hyperdrive_run)
    + +

    Arguments

    + + + + + + +
    hyperdrive_run

    The HyperDriveRun object.

    + +

    Value

    + +

    The named list of hyperparameters where element name +is the run_id, e.g. list("run_id" = hyperparameters).

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_child_run_metrics.html b/docs/reference/get_child_run_metrics.html new file mode 100644 index 00000000..c1a2b1b7 --- /dev/null +++ b/docs/reference/get_child_run_metrics.html @@ -0,0 +1,185 @@ + + + + + + + + +Get the metrics from all child runs — get_child_run_metrics • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Return the metrics from all the child runs of the +HyperDrive run.

    + +
    + +
    get_child_run_metrics(hyperdrive_run)
    + +

    Arguments

    + + + + + + +
    hyperdrive_run

    The HyperDriveRun object.

    + +

    Value

    + +

    The named list of metrics where element name is +the run_id, e.g. list("run_id" = metrics).

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_child_runs_sorted_by_primary_metric.html b/docs/reference/get_child_runs_sorted_by_primary_metric.html new file mode 100644 index 00000000..e5ceac12 --- /dev/null +++ b/docs/reference/get_child_runs_sorted_by_primary_metric.html @@ -0,0 +1,219 @@ + + + + + + + + +Get the child runs sorted in descending order by +best primary metric — get_child_runs_sorted_by_primary_metric • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Return a list of child runs of the HyperDrive run sorted by their best +primary metric. The sorting is done according to the primary metric and +its goal: if it is maximize, then the child runs are returned in descending +order of their best primary metric. If reverse = TRUE, the order is +reversed. Each child in the result has run id, hyperparameters, best primary +metric value, and status.

    +

    Child runs without the primary metric are discarded when +discard_no_metric = TRUE. Otherwise, they are appended to the list behind +other child runs with the primary metric. Note that the reverse option has no +impact on them.

    + +
    + +
    get_child_runs_sorted_by_primary_metric(hyperdrive_run, top = 0L,
    +  reverse = FALSE, discard_no_metric = FALSE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    hyperdrive_run

    The HyperDriveRun object.

    top

    An integer of the number of top child runs to be returned. If 0 +(the default value), all child runs will be returned.

    reverse

    If TRUE, the order will be reversed. This sorting only +impacts child runs with the primary metric.

    discard_no_metric

    If FALSE, child runs without the primary metric +will be appended to the list returned.

    + +

    Value

    + +

    The named list of child runs.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_compute.html b/docs/reference/get_compute.html new file mode 100644 index 00000000..1c207c2b --- /dev/null +++ b/docs/reference/get_compute.html @@ -0,0 +1,200 @@ + + + + + + + + +Get an existing compute cluster — get_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns an AmlCompute or AksCompute object for an existing compute +resource. If the compute target doesn't exist, the function will return +NULL.

    + +
    + +
    get_compute(workspace, cluster_name)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The Workspace object.

    cluster_name

    A string of the name of the cluster.

    + +

    Value

    + +

    The AmlCompute or AksCompute object.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +compute_target <- get_compute(ws, cluster_name = 'mycluster')
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_current_run.html b/docs/reference/get_current_run.html new file mode 100644 index 00000000..b16ca947 --- /dev/null +++ b/docs/reference/get_current_run.html @@ -0,0 +1,184 @@ + + + + + + + + +Gets the context object for a run — get_current_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Gets the context object for a run

    + +
    + +
    get_current_run(allow_offline = TRUE)
    + +

    Arguments

    + + + + + + +
    allow_offline

    Allow the service context to fall back to offline mode +so that the training script can be tested locally without submitting a job +with the SDK.

    + +

    Value

    + +

    The run object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_datastore.html b/docs/reference/get_datastore.html new file mode 100644 index 00000000..213ba889 --- /dev/null +++ b/docs/reference/get_datastore.html @@ -0,0 +1,188 @@ + + + + + + + + +Get an existing datastore — get_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Get the corresponding datastore object for an existing +datastore by name from the given workspace.

    + +
    + +
    get_datastore(workspace, datastore_name)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The Workspace object.

    datastore_name

    A string of the name of the datastore.

    + +

    Value

    + +

    The AzureBlobDatastore or AzureFileDatastore object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_default_datastore.html b/docs/reference/get_default_datastore.html new file mode 100644 index 00000000..0fb0b8a2 --- /dev/null +++ b/docs/reference/get_default_datastore.html @@ -0,0 +1,213 @@ + + + + + + + + +Get the default datastore for a workspace — get_default_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns the default datastore associated with the workspace.

    +

    When you create a workspace, an Azure blob container and Azure file share +are registered to the workspace with the names workspaceblobstore and +workspacefilestore, respectively. They store the connection information +of the blob container and the file share that is provisioned in the storage +account attached to the workspace. The workspaceblobstore is set as the +default datastore, and remains the default datastore unless you set a new +datastore as the default with set_default_datastore().

    + +
    + +
    get_default_datastore(workspace)
    + +

    Arguments

    + + + + + + +
    workspace

    The Workspace object.

    + +

    Value

    + +

    The default Datastore object.

    + +

    Examples

    + + + +

    Get the default datastore for the datastore:

    ws <- load_workspace_from_config()
    +ds <- get_default_datastore(ws)
    +
    + +

    If you have not changed the default datastore for the workspace, the +following code will return the same datastore object as the above +example:

    ws <- load_workspace_from_config()
    +ds <- get_datastore(ws, datastore_name = 'workspaceblobstore')
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_default_keyvault.html b/docs/reference/get_default_keyvault.html new file mode 100644 index 00000000..6e2b0d8a --- /dev/null +++ b/docs/reference/get_default_keyvault.html @@ -0,0 +1,192 @@ + + + + + + + + +Get the default keyvault for a workspace — get_default_keyvault • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns a Keyvault object representing the default +Azure Key Vault +associated with the workspace.

    + +
    + +
    get_default_keyvault(workspace)
    + +

    Arguments

    + + + + + + +
    workspace

    The Workspace object.

    + +

    Value

    + +

    The Keyvault object.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_environment.html b/docs/reference/get_environment.html new file mode 100644 index 00000000..453a00ae --- /dev/null +++ b/docs/reference/get_environment.html @@ -0,0 +1,202 @@ + + + + + + + + +Get an existing environment — get_environment • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns an Environment object for an existing environment in +the workspace.

    + +
    + +
    get_environment(workspace, name, version = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    workspace

    The Workspace object.

    name

    A string of the name of the environment.

    version

    A string of the version of the environment.

    + +

    Value

    + +

    The Environment object.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +env <- get_environment(ws, name = 'myenv', version = '1')
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_model.html b/docs/reference/get_model.html new file mode 100644 index 00000000..9d039f27 --- /dev/null +++ b/docs/reference/get_model.html @@ -0,0 +1,211 @@ + + + + + + + + +Retrieve the Model object from the cloud. — get_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Retrieve the Model object from the cloud.

    + +
    + +
    get_model(workspace, name = NULL, id = NULL, tags = NULL,
    +  properties = NULL, version = NULL, run_id = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    workspace

    The workspace object containing the Model to retrieve

    name

    Will retrieve the latest model with the corresponding name, if +it exists

    id

    Will retrieve the model with the corresponding ID, if it exists

    tags

    Optional, will filter based on the provided list, searching by +either 'key' or '[key, value]'.

    properties

    Optional, will filter based on the provided list, +searching by either 'key' or '[key, value]'.

    version

    When provided along with name, will get the specific version +of the specified named model, if it exists

    run_id

    Optional, will filter based on the provided ID.

    + +

    Value

    + +

    A model object, if one is found in the provided workspace

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_model_package_container_registry.html b/docs/reference/get_model_package_container_registry.html new file mode 100644 index 00000000..d2ae3f51 --- /dev/null +++ b/docs/reference/get_model_package_container_registry.html @@ -0,0 +1,187 @@ + + + + + + + + +Return a ContainerRegistry object for where the image +(or base image, for Dockerfile packages) is stored. — get_model_package_container_registry • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Return a ContainerRegistry object for where the image +(or base image, for Dockerfile packages) is stored.

    + +
    + +
    get_model_package_container_registry(package)
    + +

    Arguments

    + + + + + + +
    package

    Package created with model(s) and dependencies.

    + +

    Value

    + +

    ContainerRegistry object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_model_package_creation_logs.html b/docs/reference/get_model_package_creation_logs.html new file mode 100644 index 00000000..5a13f95c --- /dev/null +++ b/docs/reference/get_model_package_creation_logs.html @@ -0,0 +1,190 @@ + + + + + + + + +Retrieve the package creation logs. — get_model_package_creation_logs • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Retrieve the package creation logs.

    + +
    + +
    get_model_package_creation_logs(package, decode = TRUE, offset = 0)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    package

    Package created with model(s) and dependencies.

    decode

    Whether to decode the raw log bytes to a string.

    offset

    Byte offset from which to start reading the logs.

    + +

    Value

    + +

    Package creation logs.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_run.html b/docs/reference/get_run.html new file mode 100644 index 00000000..ae3663e7 --- /dev/null +++ b/docs/reference/get_run.html @@ -0,0 +1,186 @@ + + + + + + + + +Gets the Run object from a given run id — get_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Gets the Run object from a given run id

    + +
    + +
    get_run(experiment, run_id)
    + +

    Arguments

    + + + + + + + + + + +
    experiment

    The containing experiment.

    run_id

    The run id for the run.

    + +

    Value

    + +

    The run object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_run_details.html b/docs/reference/get_run_details.html new file mode 100644 index 00000000..593d751a --- /dev/null +++ b/docs/reference/get_run_details.html @@ -0,0 +1,187 @@ + + + + + + + + +Get the definition, status information, current log files and other details +of the run. — get_run_details • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Get the definition, status information, current log files and other details +of the run.

    + +
    + +
    get_run_details(run)
    + +

    Arguments

    + + + + + + +
    run

    the run object

    + +

    Value

    + +

    Return the details for the run

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_run_details_with_logs.html b/docs/reference/get_run_details_with_logs.html new file mode 100644 index 00000000..5a097d51 --- /dev/null +++ b/docs/reference/get_run_details_with_logs.html @@ -0,0 +1,182 @@ + + + + + + + + +Return run status including log file content. — get_run_details_with_logs • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Return run status including log file content.

    + +
    + +
    get_run_details_with_logs(run)
    + +

    Arguments

    + + + + + + +
    run

    the run object

    + +

    Value

    + +

    Returns the status for the run with log file contents

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_run_file_names.html b/docs/reference/get_run_file_names.html new file mode 100644 index 00000000..d208da12 --- /dev/null +++ b/docs/reference/get_run_file_names.html @@ -0,0 +1,182 @@ + + + + + + + + +List the files that are stored in association with the run. — get_run_file_names • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    List the files that are stored in association with the run.

    + +
    + +
    get_run_file_names(run)
    + +

    Arguments

    + + + + + + +
    run

    the run object

    + +

    Value

    + +

    The list of paths for existing artifacts

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_run_metrics.html b/docs/reference/get_run_metrics.html new file mode 100644 index 00000000..917944f8 --- /dev/null +++ b/docs/reference/get_run_metrics.html @@ -0,0 +1,182 @@ + + + + + + + + +Get the metrics for run — get_run_metrics • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Get the metrics for run

    + +
    + +
    get_run_metrics(run)
    + +

    Arguments

    + + + + + + +
    run

    Run object

    + +

    Value

    + +

    named list containing metrics associated with the run.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_runs_in_experiment.html b/docs/reference/get_runs_in_experiment.html new file mode 100644 index 00000000..6f1c38e6 --- /dev/null +++ b/docs/reference/get_runs_in_experiment.html @@ -0,0 +1,203 @@ + + + + + + + + +Return a generator of the runs for an experiment — get_runs_in_experiment • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Return a generator of the runs for an experiment, in reverse +chronological order.

    + +
    + +
    get_runs_in_experiment(experiment, type = NULL, tags = NULL,
    +  properties = NULL, include_children = FALSE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    experiment

    The Experiment object.

    type

    Filter the returned generator of runs by the provided type.

    tags

    Filter runs by tags. A named list eg. list("tag" = "value").

    properties

    Filter runs by properties. A named list +eg. list("property" = "value").

    include_children

    By default, fetch only top-level runs. +Set to TRUE to list all runs.

    + +

    Value

    + +

    The list of runs matching supplied filters.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_secrets.html b/docs/reference/get_secrets.html new file mode 100644 index 00000000..2d1d0423 --- /dev/null +++ b/docs/reference/get_secrets.html @@ -0,0 +1,196 @@ + + + + + + + + +Get secrets from a keyvault — get_secrets • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns the secret values from the keyvault associated with the +workspace for a given set of secret names. For runs submitted using +submit_experiment(), you can use get_secrets_from_run() instead, +as that method shortcuts workspace instantiation (since a submitted +run is aware of its workspace).

    + +
    + +
    get_secrets(keyvault, secrets)
    + +

    Arguments

    + + + + + + + + + + +
    keyvault

    The Keyvault object.

    secrets

    A vector of secret names.

    + +

    Value

    + +

    A named list of found and not found secrets, where element +name corresponds to the secret name. If a secret was not found, the +corresponding element will be NULL.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_secrets_from_run.html b/docs/reference/get_secrets_from_run.html new file mode 100644 index 00000000..8c1f9eba --- /dev/null +++ b/docs/reference/get_secrets_from_run.html @@ -0,0 +1,196 @@ + + + + + + + + +Get the secret values for a given list of secret names. +Get a dictionary of found and not found secrets for the list of names +provided. — get_secrets_from_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Get the secret values for a given list of secret names. +Get a dictionary of found and not found secrets for the list of names +provided.

    + +
    + +
    get_secrets_from_run(run, secrets)
    + +

    Arguments

    + + + + + + + + + + +
    run

    the run object

    secrets

    List of secret names to retrieve the values for

    + +

    Value

    + +

    Returns a dictionary of found and not found secrets

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_webservice.html b/docs/reference/get_webservice.html new file mode 100644 index 00000000..9689ef79 --- /dev/null +++ b/docs/reference/get_webservice.html @@ -0,0 +1,197 @@ + + + + + + + + +Retrieve a cloud representation of a Webservice object associated with the +provided workspace. Will return an instance of a child class corresponding to +the specific type of the retrieved Webservice object. — get_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Retrieve a cloud representation of a Webservice object associated with the +provided workspace. Will return an instance of a child class corresponding to +the specific type of the retrieved Webservice object.

    + +
    + +
    get_webservice(workspace, name)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The workspace object containing the Webservice object to +retrieve

    name

    The name of the of the Webservice object to retrieve

    + +

    Value

    + +

    The webservice object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_webservice_keys.html b/docs/reference/get_webservice_keys.html new file mode 100644 index 00000000..81169488 --- /dev/null +++ b/docs/reference/get_webservice_keys.html @@ -0,0 +1,182 @@ + + + + + + + + +Retrieve auth keys for this Webservice. — get_webservice_keys • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Retrieve auth keys for this Webservice.

    + +
    + +
    get_webservice_keys(webservice)
    + +

    Arguments

    + + + + + + +
    webservice

    The webservice object.

    + +

    Value

    + +

    The auth keys for this Webservice

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_webservice_logs.html b/docs/reference/get_webservice_logs.html new file mode 100644 index 00000000..39e0702d --- /dev/null +++ b/docs/reference/get_webservice_logs.html @@ -0,0 +1,186 @@ + + + + + + + + +Retrieve logs for the Webservice. — get_webservice_logs • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Retrieve logs for the Webservice.

    + +
    + +
    get_webservice_logs(webservice, num_lines = 5000L)
    + +

    Arguments

    + + + + + + + + + + +
    webservice

    The webservice object.

    num_lines

    The maximum number of log lines to retrieve.

    + +

    Value

    + +

    The logs for this Webservice

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_webservice_token.html b/docs/reference/get_webservice_token.html new file mode 100644 index 00000000..89a60fb1 --- /dev/null +++ b/docs/reference/get_webservice_token.html @@ -0,0 +1,183 @@ + + + + + + + + +Retrieve auth token for this Webservice, scoped to the current user. — get_webservice_token • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Retrieve auth token for this Webservice, scoped to the current user.

    + +
    + +
    get_webservice_token(webservice)
    + +

    Arguments

    + + + + + + +
    webservice

    The webservice object.

    + +

    Value

    + +

    The auth token for this Webservice and when it should be +refreshed after.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_workspace.html b/docs/reference/get_workspace.html new file mode 100644 index 00000000..d461f5db --- /dev/null +++ b/docs/reference/get_workspace.html @@ -0,0 +1,196 @@ + + + + + + + + +Get an existing workspace — get_workspace • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns a Workspace object for an existing Azure Machine Learning +workspace. Throws an exception if the workpsace doesn't exist or the +required fields don't lead to a uniquely identifiable workspace.

    + +
    + +
    get_workspace(name, subscription_id = NULL, resource_group = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    name

    A string of the workspace name to get.

    subscription_id

    A string of the subscription ID to use. The parameter +is required if the user has access to more than one subscription.

    resource_group

    A string of the resource group to use. If NULL the +method will search all resource groups in the subscription.

    + +

    Value

    + +

    The Workspace object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/get_workspace_details.html b/docs/reference/get_workspace_details.html new file mode 100644 index 00000000..17caec1b --- /dev/null +++ b/docs/reference/get_workspace_details.html @@ -0,0 +1,208 @@ + + + + + + + + +Get the details of a workspace — get_workspace_details • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns the details of the workspace.

    + +
    + +
    get_workspace_details(workspace)
    + +

    Arguments

    + + + + + + +
    workspace

    The Workspace object.

    + +

    Value

    + +

    A named list of the workspace details.

    + +

    Details

    + + + +

    The returned list contains the following named elements:

      +
    • id: URI pointing to the workspace resource, containing subscription ID, +resource group, and workspace name.

    • +
    • name: Workspace name.

    • +
    • location: Workspace region.

    • +
    • type: URI of the format "{providerName}/workspaces".

    • +
    • workspaceid: Workspace ID.

    • +
    • description: Workspace description.

    • +
    • friendlyName: Workspace friendly name.

    • +
    • creationTime: Time the workspace was created, in ISO8601.

    • +
    • containerRegistry: Workspace container registry.

    • +
    • keyVault: Workspace key vault.

    • +
    • applicationInsights: Workspace App Insights.

    • +
    • identityPrincipalId: Workspace identity principal ID.

    • +
    • identityTenantId: Workspace tenant ID.

    • +
    • identityType: Workspace identity type.

    • +
    • storageAccount: Workspace storage account.

    • +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/grid_parameter_sampling.html b/docs/reference/grid_parameter_sampling.html new file mode 100644 index 00000000..fd7d8072 --- /dev/null +++ b/docs/reference/grid_parameter_sampling.html @@ -0,0 +1,206 @@ + + + + + + + + +Define grid sampling over a hyperparameter search space — grid_parameter_sampling • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Grid sampling performs a simple grid search over all feasible values in +the defined search space. It can only be used with hyperparameters +specified using choice().

    + +
    + +
    grid_parameter_sampling(parameter_space)
    + +

    Arguments

    + + + + + + +
    parameter_space

    A named list containing each parameter and its +distribution, e.g. list("parameter" = distribution).

    + +

    Value

    + +

    The GridParameterSampling object.

    + +

    Examples

    + + +
    param_sampling <- grid_parameter_sampling( {
    +        'num_hidden_layers': choice(1, 2, 3),
    +        'batch_size': choice(16, 32)
    +    }
    +)
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/hyperdrive_config.html b/docs/reference/hyperdrive_config.html new file mode 100644 index 00000000..e8fb7a55 --- /dev/null +++ b/docs/reference/hyperdrive_config.html @@ -0,0 +1,288 @@ + + + + + + + + +Create a configuration for a HyperDrive run — hyperdrive_config • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    The HyperDrive configuration includes information about hyperparameter +space sampling, termination policy, primary metric, estimator, and +the compute target to execute the experiment runs on.

    +

    To submit the HyperDrive experiment, pass the HyperDriveConfig object +returned from this method to submit_experiment().

    + +
    + +
    hyperdrive_config(hyperparameter_sampling, primary_metric_name,
    +  primary_metric_goal, max_total_runs, max_concurrent_runs = NULL,
    +  max_duration_minutes = 10080L, policy = NULL, estimator = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    hyperparameter_sampling

    The hyperparameter sampling space. +Can be a RandomParameterSampling, GridParameterSampling, or +BayesianParameterSampling object.

    primary_metric_name

    A string of the name of the primary metric +reported by the experiment runs.

    primary_metric_goal

    The PrimaryMetricGoal object. This +parameter determines if the primary metric is to be minimized or +maximized when evaluating runs.

    max_total_runs

    An integer of the maximum total number of runs +to create. This is the upper bound; there may be fewer runs when the +sample space is smaller than this value. If both max_total_runs and +max_duration_minutes are specified, the hyperparameter tuning experiment +terminates when the first of these two thresholds is reached.

    max_concurrent_runs

    An integer of the maximum number of runs to +execute concurrently. If NULL, all runs are launched in parallel. +The number of concurrent runs is gated on the resources available in the +specified compute target. Hence, you need to ensure that the compute target +has the available resources for the desired concurrency.

    max_duration_minutes

    An integer of the maximum duration of the +HyperDrive run. Once this time is exceeded, any runs still executing are +cancelled. If both max_total_runs and max_duration_minutes are specified, +the hyperparameter tuning experiment terminates when the first of these two +thresholds is reached.

    policy

    The early termination policy to use. Can be either a +BanditPolicy, MedianStoppingPolicy, or TruncationSelectionPolicy +object. If NULL (the default), no early termination policy will be used.

    +

    The MedianStoppingPolicy with delay_evaluation of = 5 is a good +termination policy to start with. These are conservative settings that can +provide 25 +(based on our evaluation data).

    estimator

    The Estimator object.

    + +

    Value

    + +

    The HyperDriveConfig object.

    + +

    Examples

    + + +
    # Load the workspace
    +ws <- load_workspace_from_config()
    +
    +# Get the compute target
    +compute_target <- get_compute(ws, cluster_name = 'mycluster')
    +
    +# Define the primary metric goal
    +goal = primary_metric_goal("MAXIMIZE")
    +
    +# Define the early termination policy
    +early_termination_policy = median_stopping_policy(evaluation_interval = 1L,
    +                                                  delay_evaluation = 5L)
    +
    +# Create the estimator
    +est <- estimator(source_directory = '.',
    +                 entry_script = 'train.R',
    +                 compute_target = compute_target)
    +
    +# Create the HyperDrive configuration
    +hyperdrive_run_config = hyperdrive_config(
    +                                   hyperparameter_sampling = param_sampling,
    +                                   primary_metric_name = 'accuracy',
    +                                   primary_metric_goal = goal,
    +                                   max_total_runs = 100,
    +                                   max_concurrent_runs = 4,
    +                                   policy = early_termination_policy,
    +                                   estimator = est)
    +
    +# Submit the HyperDrive experiment
    +exp <- experiment(ws, name = 'myexperiment')
    +run = submit_experiment(exp, hyperdrive_run_config)
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/index.html b/docs/reference/index.html new file mode 100644 index 00000000..93935590 --- /dev/null +++ b/docs/reference/index.html @@ -0,0 +1,926 @@ + + + + + + + + +Function reference • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +

    Installation

    +

    +
    +

    install_azureml()

    +

    Install azureml sdk package

    +

    Workspaces

    +

    Functions for managing workspace resources. A Workspace is the top-level resource for Azure Machine Learning service. It provides a centralized place to work with all the artifacts you create when you use Azure ML.

    +
    +

    create_workspace()

    +

    Create a new Azure Machine Learning workspace

    +

    get_workspace()

    +

    Get an existing workspace

    +

    load_workspace_from_config()

    +

    Load workspace configuration details from a config file

    +

    write_workspace_config()

    +

    Write out the workspace configuration details to a config file

    +

    get_default_datastore()

    +

    Get the default datastore for a workspace

    +

    set_default_datastore()

    +

    Set the default datastore for a workspace

    +

    delete_workspace()

    +

    Delete a workspace

    +

    list_workspaces()

    +

    List all workspaces that the user has access to in a subscription ID

    +

    get_workspace_details()

    +

    Get the details of a workspace

    +

    get_default_keyvault()

    +

    Get the default keyvault for a workspace

    +

    set_secrets()

    +

    Add secrets to a keyvault

    +

    get_secrets()

    +

    Get secrets from a keyvault

    +

    delete_secrets()

    +

    Delete secrets from a keyvault

    +

    list_secrets()

    +

    List the secrets in a keyvault

    +

    Compute targets

    +

    Functions for managing compute resources. A Compute Target is a designated compute resource where you run your scripts or host your service deployments. Compute targets make it easy to change your compute environment without changing your code. Supported compute target types in the R SDK include AmlCompute and AksCompute.

    +
    +

    create_aml_compute()

    +

    Create an AmlCompute cluster

    +

    list_nodes_in_aml_compute()

    +

    Get the details (e.g IP address, port etc) of all the compute nodes in the +compute target

    +

    update_aml_compute()

    +

    Update scale settings for an AmlCompute cluster

    +

    create_aks_compute()

    +

    Create an AksCompute cluster

    +

    get_aks_compute_credentials()

    +

    Get the credentials for an AksCompute cluster

    +

    attach_aks_compute()

    +

    Attach an existing AKS cluster to a workspace

    +

    detach_aks_compute()

    +

    Detach an AksCompute cluster from its associated workspace

    +

    get_compute()

    +

    Get an existing compute cluster

    +

    wait_for_provisioning_completion()

    +

    Wait for a cluster to finish provisioning

    +

    list_supported_vm_sizes()

    +

    List the supported VM sizes in a region

    +

    delete_compute()

    +

    Delete a cluster

    +

    Working with data

    +

    Functions for managing and accessing data for your machine learning workflows. A Datastore is attached to a workspace and is used to store connection information to an Azure storage service. Azure storage services that can be registered as datastores through the R SDK include Azure Blob Container and Azure File Share.

    +
    +

    upload_files_to_datastore()

    +

    Upload files to the Azure storage a datastore points to

    +

    upload_to_datastore()

    +

    Upload a local directory to the Azure storage a datastore points to

    +

    download_from_datastore()

    +

    Download data from a datastore to the local file system

    +

    get_datastore()

    +

    Get an existing datastore

    +

    register_azure_blob_container_datastore()

    +

    Register an Azure blob container as a datastore

    +

    register_azure_file_share_datastore()

    +

    Register an Azure file share as a datastore

    +

    unregister_datastore()

    +

    Unregister a datastore from its associated workspace

    +

    Environments

    +

    Functions for managing environments. An Azure Machine Learning Environment allows you to create, manage, and reuse the software dependencies required for training and deployment. Environments specify the R packages, environment variables, and software settings around your training and scoring scripts for your containerized training runs and deployments. They are managed and versioned entities within your Azure ML workspace that enable reproducible, auditable, and portable machine learning workflows across different compute targets. For more details, see r_environment().

    +
    +

    r_environment()

    +

    Create an environment

    +

    register_environment()

    +

    Register an environment in the workspace

    +

    get_environment()

    +

    Get an existing environment

    +

    container_registry()

    +

    Specify Azure Container Registry details

    +

    Training & experimentation

    +

    Functions for managing experiments and runs. An Experiment is a grouping of the collection of runs from a specified script. A Run represents a single trial of an experiment. A run is the object used to monitor the asynchronous execution of a trial, log metrics and store output of the trial, and to analyze results and access artifacts generated by the trial. The following run types are supported - ScriptRun (for Estimator experiments) and HyperDriveRun (for HyperDrive experiments). For functions that are specific only to HyperDriveRuns, see the Hyperparameter tuning reference sections. An Estimator wraps run configuration information for specifying details of executing an R script. Running an Estimator experiment (using submit_experiment()) will return a ScriptRun object and execute your training script on the specified compute target.

    +
    +

    experiment()

    +

    Create an Azure Machine Learning experiment

    +

    submit_experiment()

    +

    Submit an experiment and return the active created run

    +

    get_runs_in_experiment()

    +

    Return a generator of the runs for an experiment

    +

    estimator()

    +

    Create an estimator

    +

    wait_for_run_completion()

    +

    Wait for the completion of this run

    +

    get_current_run()

    +

    Gets the context object for a run

    +

    log_metric_to_run()

    +

    Log metric to run

    +

    get_run_metrics()

    +

    Get the metrics for run

    +

    cancel_run()

    +

    Cancel run

    +

    download_file_from_run()

    +

    Download an associated file from storage.

    +

    download_files_from_run()

    +

    Download files from a given storage prefix (folder name) or +the entire container if prefix is unspecified.

    +

    get_run()

    +

    Gets the Run object from a given run id

    +

    get_run_details()

    +

    Get the definition, status information, current log files and other details +of the run.

    +

    get_run_details_with_logs()

    +

    Return run status including log file content.

    +

    get_run_file_names()

    +

    List the files that are stored in association with the run.

    +

    get_secrets_from_run()

    +

    Get the secret values for a given list of secret names. +Get a dictionary of found and not found secrets for the list of names +provided.

    +

    log_accuracy_table_to_run()

    +

    Log a accuracy table to the artifact store.

    +

    log_confusion_matrix_to_run()

    +

    Log a confusion matrix to the artifact store.

    +

    log_image_to_run()

    +

    Log an image metric to the run record.

    +

    log_list_to_run()

    +

    Log a list metric value to the run with the given name.

    +

    log_predictions_to_run()

    +

    Log a predictions to the artifact store.

    +

    log_residuals_to_run()

    +

    Log a residuals to the artifact store.

    +

    log_row_to_run()

    +

    Log a row metric to the run with the given name.

    +

    log_table_to_run()

    +

    Log a table metric to the run with the given name.

    +

    Hyperparameter tuning

    +

    Functions for configuring and managing hyperparameter tuning (HyperDrive) experiments. Azure ML’s HyperDrive functionality enables you to automate hyperparameter tuning of your machine learning models. For example, you can define the parameter search space as discrete or continuous, and a sampling method over the search space as random, grid, or Bayesian. Also, you can specify a primary metric to optimize in the hyperparameter tuning experiment, and whether to minimize or maximize that metric. You can also define early termination policies in which poorly performing experiment runs are canceled and new ones started.

    +
    +

    hyperdrive_config()

    +

    Create a configuration for a HyperDrive run

    +

    random_parameter_sampling()

    +

    Define random sampling over a hyperparameter search space

    +

    grid_parameter_sampling()

    +

    Define grid sampling over a hyperparameter search space

    +

    bayesian_parameter_sampling()

    +

    Define Bayesian sampling over a hyperparameter search space

    +

    choice()

    +

    Specify a discrete set of options to sample from

    +

    randint()

    +

    Specify a set of random integers in the range [0, upper)

    +

    uniform()

    +

    Specify a uniform distribution of options to sample from

    +

    quniform()

    +

    Specify a uniform distribution of the form +round(uniform(min_value, max_value) / q) * q

    +

    loguniform()

    +

    Specify a log uniform distribution

    +

    qloguniform()

    +

    Specify a uniform distribution of the form +round(exp(uniform(min_value, max_value) / q) * q

    +

    normal()

    +

    Specify a real value that is normally-distributed with mean mu and standard +deviation sigma

    +

    qnormal()

    +

    Specify a normal distribution of the form round(normal(mu, sigma) / q) * q

    +

    lognormal()

    +

    Specify a normal distribution of the form exp(normal(mu, sigma))

    +

    qlognormal()

    +

    Specify a normal distribution of the form +round(exp(normal(mu, sigma)) / q) * q

    +

    primary_metric_goal()

    +

    Define supported metric goals for hyperparameter tuning

    +

    bandit_policy()

    +

    Define a Bandit policy for early termination of HyperDrive runs

    +

    median_stopping_policy()

    +

    Define a median stopping policy for early termination of HyperDrive runs

    +

    truncation_selection_policy()

    +

    Define a truncation selection policy for early termination of HyperDrive runs

    +

    get_best_run_by_primary_metric()

    +

    Return the best performing run amongst all completed runs

    +

    get_child_runs_sorted_by_primary_metric()

    +

    Get the child runs sorted in descending order by +best primary metric

    +

    get_child_run_hyperparameters()

    +

    Get the hyperparameters for all child runs

    +

    get_child_run_metrics()

    +

    Get the metrics from all child runs

    +

    Model management & deployment

    +

    Functions for model management and deployment. Registering a model allows you to store and version your trained model in a workspace. A registered Model can then be deployed as a Webservice using Azure ML. If you would like to access all the assets needed to host a model as a web service without actually deploying the model, you can do so by packaging the model as a ModelPackage. You can deploy your model as a LocalWebservice (locally), AciWebservice (on Azure Container Instances), or AksWebservice (on Azure Kubernetes Service).

    +
    +

    get_model()

    +

    Retrieve the Model object from the cloud.

    +

    register_model()

    +

    Register a model with the provided workspace.

    +

    download_model()

    +

    Download model to target_dir of local file system.

    +

    deploy_model()

    +

    Deploy a Webservice from zero or more model objects.

    +

    package_model()

    +

    Create a model package in the form of a Docker image or Dockerfile build +context

    +

    delete_model()

    +

    Delete this model from its associated workspace.

    +

    get_model_package_container_registry()

    +

    Return a ContainerRegistry object for where the image +(or base image, for Dockerfile packages) is stored.

    +

    get_model_package_creation_logs()

    +

    Retrieve the package creation logs.

    +

    pull_model_package_image()

    +

    Pull the package output to the local machine. +This can only be used with a Docker image package.

    +

    save_model_package_files()

    +

    Save the package output to a local directory. +This can only be used with a Dockerfile package.

    +

    wait_for_model_package_creation()

    +

    Wait for the package to finish creating.

    +

    inference_config()

    +

    Creates the Model deployment config specific to model deployments.

    +

    get_webservice()

    +

    Retrieve a cloud representation of a Webservice object associated with the +provided workspace. Will return an instance of a child class corresponding to +the specific type of the retrieved Webservice object.

    +

    wait_for_deployment()

    +

    Automatically poll on the running Webservice deployment.

    +

    get_webservice_logs()

    +

    Retrieve logs for the Webservice.

    +

    get_webservice_keys()

    +

    Retrieve auth keys for this Webservice.

    +

    generate_new_webservice_key()

    +

    Regenerate one of the Webservice's keys. Must specify either 'Primary' or +'Secondary' key.

    +

    get_webservice_token()

    +

    Retrieve auth token for this Webservice, scoped to the current user.

    +

    invoke_webservice()

    +

    Call this Webservice with the provided input.

    +

    delete_webservice()

    +

    Delete this Webservice from its associated workspace.

    +

    aci_webservice_deployment_config()

    +

    Create a configuration object for deploying an ACI Webservice.

    +

    update_aci_webservice()

    +

    Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice.

    +

    aks_webservice_deployment_config()

    +

    Create a configuration object for deploying to an AKS compute target.

    +

    update_aks_webservice()

    +

    Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice.

    +

    local_webservice_deployment_config()

    +

    Create a configuration object for deploying a local Webservice.

    +

    update_local_webservice()

    +

    Update the LocalWebservice with provided properties. +Values left as None will remain unchanged in this LocalWebservice.

    +

    delete_local_webservice()

    +

    Delete this LocalWebservice from the local machine. +This function call is not asynchronous; it runs until the service is deleted.

    +

    reload_local_webservice_assets()

    +

    Reload the LocalWebservice's execution script and dependencies. +This restarts the service's container with copies of updated assets, +including the execution script and local dependencies, but it does not +rebuild the underlying image. Accordingly, changes to Conda/pip dependencies +or custom Docker steps will not be reflected in the reloaded LocalWebservice. +To handle those changes call LocalWebservice.update(), instead.

    +
    + + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/inference_config.html b/docs/reference/inference_config.html new file mode 100644 index 00000000..918ea444 --- /dev/null +++ b/docs/reference/inference_config.html @@ -0,0 +1,201 @@ + + + + + + + + +Creates the Model deployment config specific to model deployments. — inference_config • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Creates the Model deployment config specific to model deployments.

    + +
    + +
    inference_config(entry_script, source_directory = NULL,
    +  description = NULL, environment = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    entry_script

    Path to local file that contains the code to run for the +image.

    source_directory

    paths to folders that contains all files to create +the image.

    description

    A description to give this image.

    environment

    An environment object to use for the deployment. +Doesn't have to be registered. A user should provide either this, or the +other parameters, not both. The individual parameters will NOT serve +as an override for the environment object. Exceptions include +`entry_script`, `source_directory` and `description`.

    + +

    Value

    + +

    An InferenceConfig object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/install_azureml.html b/docs/reference/install_azureml.html new file mode 100644 index 00000000..5a6f3413 --- /dev/null +++ b/docs/reference/install_azureml.html @@ -0,0 +1,185 @@ + + + + + + + + +Install azureml sdk package — install_azureml • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Install azureml sdk package

    + +
    + +
    install_azureml(version = NULL, envname = "r-azureml",
    +  conda_python_version = "3.6")
    + +

    Arguments

    + + + + + + + + + + + + + + +
    version

    azureml sdk package version

    envname

    name of environment to create

    conda_python_version

    version of python for conda environment

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/invoke_webservice.html b/docs/reference/invoke_webservice.html new file mode 100644 index 00000000..59b27f9a --- /dev/null +++ b/docs/reference/invoke_webservice.html @@ -0,0 +1,188 @@ + + + + + + + + +Call this Webservice with the provided input. — invoke_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Call this Webservice with the provided input.

    + +
    + +
    invoke_webservice(webservice, input_data)
    + +

    Arguments

    + + + + + + + + + + +
    webservice

    The webservice object.

    input_data

    The input data to call the Webservice with. This is the +data your machine learning model expects as an input to run predictions.

    + +

    Value

    + +

    The result of calling the Webservice. This will return predictions +run from your machine learning model.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/list_nodes_in_aml_compute.html b/docs/reference/list_nodes_in_aml_compute.html new file mode 100644 index 00000000..f62f609d --- /dev/null +++ b/docs/reference/list_nodes_in_aml_compute.html @@ -0,0 +1,187 @@ + + + + + + + + +Get the details (e.g IP address, port etc) of all the compute nodes in the +compute target — list_nodes_in_aml_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Get the details (e.g IP address, port etc) of all the compute nodes in the +compute target

    + +
    + +
    list_nodes_in_aml_compute(cluster)
    + +

    Arguments

    + + + + + + +
    cluster

    cluster object

    + +

    Value

    + +

    Details of all the compute nodes in the cluster in data frame

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/list_secrets.html b/docs/reference/list_secrets.html new file mode 100644 index 00000000..81ac7424 --- /dev/null +++ b/docs/reference/list_secrets.html @@ -0,0 +1,184 @@ + + + + + + + + +List the secrets in a keyvault — list_secrets • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns the list of secret names for all the secrets in the keyvault +associated with the workspace.

    + +
    + +
    list_secrets(keyvault)
    + +

    Arguments

    + + + + + + +
    keyvault

    The Keyvault object.

    + +

    Value

    + +

    A list of secret names.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/list_supported_vm_sizes.html b/docs/reference/list_supported_vm_sizes.html new file mode 100644 index 00000000..3f3c1d77 --- /dev/null +++ b/docs/reference/list_supported_vm_sizes.html @@ -0,0 +1,188 @@ + + + + + + + + +List the supported VM sizes in a region — list_supported_vm_sizes • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    List the supported VM sizes in a region

    + +
    + +
    list_supported_vm_sizes(workspace, location = NULL)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The Workspace object.

    location

    A string of the location of the cluster. If not specified, +will default to the workspace location.

    + +

    Value

    + +

    List of supported VM sizes in a region with name of the VM, VCPUs, +RAM in data frame

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/list_workspaces.html b/docs/reference/list_workspaces.html new file mode 100644 index 00000000..2a908916 --- /dev/null +++ b/docs/reference/list_workspaces.html @@ -0,0 +1,194 @@ + + + + + + + + +List all workspaces that the user has access to in a subscription ID — list_workspaces • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    List all workspaces that the user has access to in the specified +subscription_id parameter. The list of workspaces can be filtered +based on the resource group.

    + +
    + +
    list_workspaces(subscription_id, resource_group = NULL)
    + +

    Arguments

    + + + + + + + + + + +
    subscription_id

    A string of the specified subscription ID to +list the workspaces in.

    resource_group

    A string of the specified resource group to list +the workspaces. If NULL the method will list all the workspaces within +the specified subscription in.

    + +

    Value

    + +

    A named list of Workspace objects where element name corresponds +to the workspace name.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/load_workspace_from_config.html b/docs/reference/load_workspace_from_config.html new file mode 100644 index 00000000..b0215d7a --- /dev/null +++ b/docs/reference/load_workspace_from_config.html @@ -0,0 +1,194 @@ + + + + + + + + +Load workspace configuration details from a config file — load_workspace_from_config • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Returns a Workspace object for an existing Azure Machine Learning +workspace by reading the workspace configuration from a file. The method +provides a simple way of reusing the same workspace across multiple files or +projects. Users can save the workspace ARM properties using +write_workspace_config(), and use this method to load the same workspace +in different files or projects without retyping the workspace ARM properties.

    + +
    + +
    load_workspace_from_config(path = NULL)
    + +

    Arguments

    + + + + + + +
    path

    A string of the path to the config file or starting directory +for search. The parameter defaults to starting the search in the current +directory.

    + +

    Value

    + +

    The Workspace object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/local_webservice_deployment_config.html b/docs/reference/local_webservice_deployment_config.html new file mode 100644 index 00000000..b767af83 --- /dev/null +++ b/docs/reference/local_webservice_deployment_config.html @@ -0,0 +1,183 @@ + + + + + + + + +Create a configuration object for deploying a local Webservice. — local_webservice_deployment_config • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Create a configuration object for deploying a local Webservice.

    + +
    + +
    local_webservice_deployment_config(port = NULL)
    + +

    Arguments

    + + + + + + +
    port

    The local port on which to expose the service's HTTP endpoint.

    + +

    Value

    + +

    LocalWebserviceDeploymentConfiguration object to use when deploying +a Webservice object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_accuracy_table_to_run.html b/docs/reference/log_accuracy_table_to_run.html new file mode 100644 index 00000000..d1d6f508 --- /dev/null +++ b/docs/reference/log_accuracy_table_to_run.html @@ -0,0 +1,189 @@ + + + + + + + + +Log a accuracy table to the artifact store. — log_accuracy_table_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log a accuracy table to the artifact store.

    + +
    + +
    log_accuracy_table_to_run(name, value, description = "", run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    name

    The name of the accuracy table

    value

    json containing name, version, and data properties

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_confusion_matrix_to_run.html b/docs/reference/log_confusion_matrix_to_run.html new file mode 100644 index 00000000..d2659117 --- /dev/null +++ b/docs/reference/log_confusion_matrix_to_run.html @@ -0,0 +1,189 @@ + + + + + + + + +Log a confusion matrix to the artifact store. — log_confusion_matrix_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log a confusion matrix to the artifact store.

    + +
    + +
    log_confusion_matrix_to_run(name, value, description = "", run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    name

    The name of the confusion matrix

    value

    json containing name, version, and data properties

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_image_to_run.html b/docs/reference/log_image_to_run.html new file mode 100644 index 00000000..481e1390 --- /dev/null +++ b/docs/reference/log_image_to_run.html @@ -0,0 +1,194 @@ + + + + + + + + +Log an image metric to the run record. — log_image_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log an image metric to the run record.

    + +
    + +
    log_image_to_run(name, path = NULL, plot = NULL, description = "",
    +  run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    name

    The name of metric

    path

    The path or stream of the image

    plot

    The plot to log as an image

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_list_to_run.html b/docs/reference/log_list_to_run.html new file mode 100644 index 00000000..619fc804 --- /dev/null +++ b/docs/reference/log_list_to_run.html @@ -0,0 +1,189 @@ + + + + + + + + +Log a list metric value to the run with the given name. — log_list_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log a list metric value to the run with the given name.

    + +
    + +
    log_list_to_run(name, value, description = "", run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    name

    The name of metric

    value

    The value of the metric

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_metric_to_run.html b/docs/reference/log_metric_to_run.html new file mode 100644 index 00000000..767525d1 --- /dev/null +++ b/docs/reference/log_metric_to_run.html @@ -0,0 +1,185 @@ + + + + + + + + +Log metric to run — log_metric_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log metric to run

    + +
    + +
    log_metric_to_run(name, value, run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    name

    name of the metric

    value

    value of the metric

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_predictions_to_run.html b/docs/reference/log_predictions_to_run.html new file mode 100644 index 00000000..474cff22 --- /dev/null +++ b/docs/reference/log_predictions_to_run.html @@ -0,0 +1,189 @@ + + + + + + + + +Log a predictions to the artifact store. — log_predictions_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log a predictions to the artifact store.

    + +
    + +
    log_predictions_to_run(name, value, description = "", run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    name

    The name of the predictions

    value

    json containing name, version, and data properties

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_residuals_to_run.html b/docs/reference/log_residuals_to_run.html new file mode 100644 index 00000000..e4adb1f6 --- /dev/null +++ b/docs/reference/log_residuals_to_run.html @@ -0,0 +1,189 @@ + + + + + + + + +Log a residuals to the artifact store. — log_residuals_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log a residuals to the artifact store.

    + +
    + +
    log_residuals_to_run(name, value, description = "", run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    name

    The name of the predictions

    value

    json containing name, version, and data properties

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_row_to_run.html b/docs/reference/log_row_to_run.html new file mode 100644 index 00000000..2714bbd4 --- /dev/null +++ b/docs/reference/log_row_to_run.html @@ -0,0 +1,189 @@ + + + + + + + + +Log a row metric to the run with the given name. — log_row_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log a row metric to the run with the given name.

    + +
    + +
    log_row_to_run(name, description = "", run = NULL, ...)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    name

    The name of metric

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    ...

    Each named parameter generates a column with the value specified.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/log_table_to_run.html b/docs/reference/log_table_to_run.html new file mode 100644 index 00000000..b9547f52 --- /dev/null +++ b/docs/reference/log_table_to_run.html @@ -0,0 +1,190 @@ + + + + + + + + +Log a table metric to the run with the given name. — log_table_to_run • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Log a table metric to the run with the given name.

    + +
    + +
    log_table_to_run(name, value, description = "", run = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    name

    The name of metric

    value

    The table value of the metric (dictionary where keys are +columns to be posted to the service)

    description

    An optional metric description

    run

    Run object. If not specified, will default to current run from +service context.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/lognormal.html b/docs/reference/lognormal.html new file mode 100644 index 00000000..ffd64f6b --- /dev/null +++ b/docs/reference/lognormal.html @@ -0,0 +1,197 @@ + + + + + + + + +Specify a normal distribution of the form <code>exp(normal(mu, sigma))</code> — lognormal • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a normal distribution of the form exp(normal(mu, sigma)).

    +

    The logarithm of the return value is normally distributed. When optimizing, +this variable is constrained to be positive.

    + +
    + +
    lognormal(mu, sigma)
    + +

    Arguments

    + + + + + + + + + + +
    mu

    A double of the mean of the normal distribution.

    sigma

    A double of the standard deviation of the normal distribution.

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/loguniform.html b/docs/reference/loguniform.html new file mode 100644 index 00000000..aeb0f13e --- /dev/null +++ b/docs/reference/loguniform.html @@ -0,0 +1,203 @@ + + + + + + + + +Specify a log uniform distribution — loguniform • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a log uniform distribution.

    +

    A value is drawn according to exp(uniform(min_value, max_value)) so that +the logarithm of the return value is uniformly distributed. When optimizing, +this variable is constrained to the interval +[exp(min_value), exp(max_value)].

    + +
    + +
    loguniform(min_value, max_value)
    + +

    Arguments

    + + + + + + + + + + +
    min_value

    A double where the minimum value in the range will be +exp(min_value) (inclusive).

    max_value

    A double where the maximum value in the range will be +exp(min_value) (inclusive).

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/median_stopping_policy.html b/docs/reference/median_stopping_policy.html new file mode 100644 index 00000000..c6319e6b --- /dev/null +++ b/docs/reference/median_stopping_policy.html @@ -0,0 +1,236 @@ + + + + + + + + +Define a median stopping policy for early termination of HyperDrive runs — median_stopping_policy • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Median stopping is an early termination policy based on running averages of +primary metrics reported by the runs. This policy computes running averages +across all training runs and terminates runs whose performance is worse than +the median of the running averages. Specifically, a run will be canceled at +interval N if its best primary metric reported up to interval N is worse than +the median of the running averages for intervals 1:N across all runs.

    + +
    + +
    median_stopping_policy(evaluation_interval = 1L, delay_evaluation = 0L)
    + +

    Arguments

    + + + + + + + + + + +
    evaluation_interval

    An integer of the frequency for applying policy.

    delay_evaluation

    An integer of the number of intervals for which to +delay the first evaluation.

    + +

    Value

    + +

    The MedianStoppingPolicy object.

    + +

    Details

    + + + +

    The median stopping policy takes the following optional configuration +parameters:

      +
    • evaluation_interval: Optional. The frequency for applying the policy. +Each time the training script logs the primary metric counts as one +interval.

    • +
    • delay_evaluation: Optional. The number of intervals to delay the +policy evaluation. Use this parameter to avoid premature termination +of training runs. If specified, the policy applies every multiple of +evaluation_interval that is greater than or equal to delay_evaluation.

    • +
    + +

    This policy is inspired from the research publication +Google Vizier: A Service for Black-Box Optimization.

    +

    If you are looking for a conservative policy that provides savings without +terminating promising jobs, you can use a MedianStoppingPolicy with +evaluation_interval = 1 and delay_evaluation = 5. These are conservative +settings that can provide approximately 25 +the primary metric (based on our evaluation data).

    + +

    Examples

    + + + +

    In this example, the early termination policy is applied at every +interval starting at evaluation interval 5. A run will be terminated at +interval 5 if its best primary metric is worse than the median of the +running averages over intervals 1:5 across all training runs.

    early_termination_policy = median_stopping_policy(evaluation_interval = 1L,
    +                                                  delay_evaluation = 5L)
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/normal.html b/docs/reference/normal.html new file mode 100644 index 00000000..584ced21 --- /dev/null +++ b/docs/reference/normal.html @@ -0,0 +1,200 @@ + + + + + + + + +Specify a real value that is normally-distributed with mean <code>mu</code> and standard +deviation <code>sigma</code> — normal • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a real value that is normally-distributed with mean mu and +standard deviation sigma.

    +

    When optimizing, this is an unconstrained variable.

    + +
    + +
    normal(mu, sigma)
    + +

    Arguments

    + + + + + + + + + + +
    mu

    A double of the mean of the normal distribution.

    sigma

    A double of the standard deviation of the normal distribution.

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/package_model.html b/docs/reference/package_model.html new file mode 100644 index 00000000..ec7cfc7b --- /dev/null +++ b/docs/reference/package_model.html @@ -0,0 +1,203 @@ + + + + + + + + +Create a model package in the form of a Docker image or Dockerfile build +context — package_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Create a model package in the form of a Docker image or Dockerfile build +context

    + +
    + +
    package_model(workspace, models, inference_config,
    +  generate_dockerfile = FALSE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    workspace

    The workspace in which to create the package.

    models

    A list of Model objects to include in the package. Can be an +empty list.

    inference_config

    An InferenceConfig object to configure the +operation of the models. This must include an Environment object.

    generate_dockerfile

    Whether to create a Dockerfile that can be run +locally instead of building an image.

    + +

    Value

    + +

    A ModelPackage object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/primary_metric_goal.html b/docs/reference/primary_metric_goal.html new file mode 100644 index 00000000..8fff2805 --- /dev/null +++ b/docs/reference/primary_metric_goal.html @@ -0,0 +1,192 @@ + + + + + + + + +Define supported metric goals for hyperparameter tuning — primary_metric_goal • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    A metric goal is used to determine whether a higher value for a metric +is better or worse. Metric goals are used when comparing runs based on +the primary metric. For example, you may want to maximize accuracy or +minimize error.

    +

    The primary metric name and goal are specified to hyperdrive_config() +when you configure a HyperDrive run.

    + +
    + +
    primary_metric_goal(goal)
    + +

    Arguments

    + + + + + + +
    goal

    A string of the metric goal (either "MAXIMIZE" or "MINIMIZE").

    + +

    Value

    + +

    The PrimaryMetricGoal object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/pull_model_package_image.html b/docs/reference/pull_model_package_image.html new file mode 100644 index 00000000..da695f88 --- /dev/null +++ b/docs/reference/pull_model_package_image.html @@ -0,0 +1,181 @@ + + + + + + + + +Pull the package output to the local machine. +This can only be used with a Docker image package. — pull_model_package_image • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Pull the package output to the local machine. +This can only be used with a Docker image package.

    + +
    + +
    pull_model_package_image(package)
    + +

    Arguments

    + + + + + + +
    package

    Package created with model(s) and dependencies.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/qlognormal.html b/docs/reference/qlognormal.html new file mode 100644 index 00000000..dc675240 --- /dev/null +++ b/docs/reference/qlognormal.html @@ -0,0 +1,208 @@ + + + + + + + + +Specify a normal distribution of the form +<code>round(exp(normal(mu, sigma)) / q) * q</code> — qlognormal • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a normal distribution of the form +round(exp(normal(mu, sigma)) / q) * q.

    +

    Suitable for a discrete variable with respect to which the objective is +smooth and gets smoother with the size of the variable, which is bounded +from one side.

    + +
    + +
    qlognormal(mu, sigma, q)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    mu

    A double of the mean of the normal distribution.

    sigma

    A double of the standard deviation of the normal distribution.

    q

    An integer of the smoothing factor.

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/qloguniform.html b/docs/reference/qloguniform.html new file mode 100644 index 00000000..93db7640 --- /dev/null +++ b/docs/reference/qloguniform.html @@ -0,0 +1,208 @@ + + + + + + + + +Specify a uniform distribution of the form +<code>round(exp(uniform(min_value, max_value) / q) * q</code> — qloguniform • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a uniform distribution of the form +round(exp(uniform(min_value, max_value) / q) * q.

    +

    This is suitable for a discrete variable with respect to which the objective +is "smooth", and gets smoother with the size of the value, but which should +be bounded both above and below.

    + +
    + +
    qloguniform(min_value, max_value, q)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    min_value

    A double of the minimum value in the range (inclusive).

    max_value

    A double of the maximum value in the range (inclusive).

    q

    An integer of the smoothing factor.

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/qnormal.html b/docs/reference/qnormal.html new file mode 100644 index 00000000..98ca41ef --- /dev/null +++ b/docs/reference/qnormal.html @@ -0,0 +1,201 @@ + + + + + + + + +Specify a normal distribution of the <code>form round(normal(mu, sigma) / q) * q</code> — qnormal • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a normal distribution of the form round(normal(mu, sigma) / q) * q.

    +

    Suitable for a discrete variable that probably takes a value around mu, +but is fundamentally unbounded.

    + +
    + +
    qnormal(mu, sigma, q)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    mu

    A double of the mean of the normal distribution.

    sigma

    A double of the standard deviation of the normal distribution.

    q

    An integer of the smoothing factor.

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/quniform.html b/docs/reference/quniform.html new file mode 100644 index 00000000..23744373 --- /dev/null +++ b/docs/reference/quniform.html @@ -0,0 +1,206 @@ + + + + + + + + +Specify a uniform distribution of the form +<code>round(uniform(min_value, max_value) / q) * q</code> — quniform • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a uniform distribution of the form +round(uniform(min_value, max_value) / q) * q.

    +

    This is suitable for a discrete value with respect to which the objective +is still somewhat "smooth", but which should be bounded both above and below.

    + +
    + +
    quniform(min_value, max_value, q)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    min_value

    A double of the minimum value in the range (inclusive).

    max_value

    A double of the maximum value in the range (inclusive).

    q

    An integer of the smoothing factor.

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/r_environment.html b/docs/reference/r_environment.html new file mode 100644 index 00000000..5a56b6af --- /dev/null +++ b/docs/reference/r_environment.html @@ -0,0 +1,287 @@ + + + + + + + + +Create an environment — r_environment • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Configure the R environment to be used for training or web service +deployments. When you submit a run or deploy a model, Azure ML builds a +Docker image and creates a conda environment with your specifications from +your Environment object within that Docker container.

    +

    If the custom_docker_image parameter +is not set, Azure ML automatically uses a default base image (CPU or GPU +depending on the use_gpu flag) and installs any R packages specified in the +cran_packages, github_packages, or custom_url_packages parameters. +TODO: link to the Dockerfiles of the default base images.

    + +
    + +
    r_environment(name, version = NULL, environment_variables = NULL,
    +  cran_packages = NULL, github_packages = NULL,
    +  custom_url_packages = NULL, custom_docker_image = NULL,
    +  image_registry_details = NULL, use_gpu = FALSE, shm_size = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    name

    A string of the name of the environment.

    version

    A string of the version of the environment.

    environment_variables

    A named list of environment variables names +and values. These environment variables are set on the process where the user +script is being executed.

    cran_packages

    A character vector of CRAN packages to be installed.

    github_packages

    A character vector of GitHub packages to be installed.

    custom_url_packages

    A character vector of packages to be installed +from local directory or custom URL.

    custom_docker_image

    A string of the name of the Docker image from +which the image to use for training or deployment will be built. If not set, +a default CPU-based image will be used as the base image. To use an image +from a private Docker repository, you will also have to specify the +image_registry_details parameter.

    image_registry_details

    A ContainerRegistry object of the details of +the Docker image registry for the custom Docker image.

    use_gpu

    Indicates whether the environment should support GPUs. +If TRUE, a GPU-based default Docker image will be used in the environment. +If FALSE, a CPU-based image will be used. Default Docker images (CPU or +GPU) will only be used if the custom_docker_image parameter is not set.

    shm_size

    A string for the size of the Docker container's shared +memory block. For more information, see +Docker run reference +If not set, a default value of '2g' is used.

    + +

    Value

    + +

    The Environment object.

    + +

    Details

    + + + +

    Once built, the Docker image appears in the Azure Container Registry +associated with your workspace, by default. The repository name has the form +azureml/azureml_<uuid>. The unique identifier (uuid) part corresponds to +a hash computed from the environment configuration. This allows the service +to determine whether an image corresponding to the given environment already +exists for reuse.

    +

    If you make changes to an existing environment, such as adding an R package, +a new version of the environment is created when you either submit a run, +deploy a model, or manually register the environment. The versioning allows +you to view changes to the environment over time.

    + +

    Examples

    + + + +

    The following example defines an environment that will use the default +base CPU image and install the additional e1071 package from CRAN.

    r_env <- r_environment(name = 'myr_env',
    +                       version = '1',
    +                       cran_packages = c('e1071'))
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/randint.html b/docs/reference/randint.html new file mode 100644 index 00000000..a4fbc0ee --- /dev/null +++ b/docs/reference/randint.html @@ -0,0 +1,208 @@ + + + + + + + + +Specify a set of random integers in the range <code>[0, upper)</code> — randint • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a set of random integers in the range [0, upper) +to sample the hyperparameters from.

    +

    The semantics of this distribution is that there is no more +correlation in the loss function between nearby integer values, +as compared with more distant integer values. This is an +appropriate distribution for describing random seeds, for example. +If the loss function is probably more correlated for nearby integer +values, then you should probably use one of the "quantized" continuous +distributions, such as either quniform(), qloguniform(), qnormal(), +or qlognormal().

    + +
    + +
    randint(upper)
    + +

    Arguments

    + + + + + + +
    upper

    An integer of the upper bound for the range of +integers (exclusive).

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/random_parameter_sampling.html b/docs/reference/random_parameter_sampling.html new file mode 100644 index 00000000..4831036e --- /dev/null +++ b/docs/reference/random_parameter_sampling.html @@ -0,0 +1,224 @@ + + + + + + + + +Define random sampling over a hyperparameter search space — random_parameter_sampling • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    In random sampling, hyperparameter values are randomly selected from the +defined search space. Random sampling allows the search space to include +both discrete and continuous hyperparameters.

    + +
    + +
    random_parameter_sampling(parameter_space, properties = NULL)
    + +

    Arguments

    + + + + + + + + + + +
    parameter_space

    A named list containing each parameter and its +distribution, e.g. list("parameter" = distribution).

    properties

    A named list of additional properties for the algorithm.

    + +

    Value

    + +

    The RandomParameterSampling object.

    + +

    Details

    + + + +

    In this sampling algorithm, parameter values are chosen from a set of +discrete values or a distribution over a continuous range. Functions you can +use include: +choice(), randint(), uniform(), quniform(), loguniform(), +qloguniform(), normal(), qnormal(), lognormal(), and qlognormal().

    + +

    Examples

    + + +
    param_sampling <- random_parameter_sampling( {
    +        'learning_rate': normal(10, 3),
    +        'keep_probability': uniform(0.05, 0.1),
    +        'batch_size': choice(16, 32, 64, 128)
    +    }
    +)
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/register_azure_blob_container_datastore.html b/docs/reference/register_azure_blob_container_datastore.html new file mode 100644 index 00000000..6f3cbf13 --- /dev/null +++ b/docs/reference/register_azure_blob_container_datastore.html @@ -0,0 +1,285 @@ + + + + + + + + +Register an Azure blob container as a datastore — register_azure_blob_container_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Register an Azure blob container as a datastore. You can choose to use +either the SAS token or the storage account key.

    + +
    + +
    register_azure_blob_container_datastore(workspace, datastore_name,
    +  container_name, account_name, sas_token = NULL, account_key = NULL,
    +  protocol = NULL, endpoint = NULL, overwrite = FALSE,
    +  create_if_not_exists = FALSE, skip_validation = FALSE,
    +  blob_cache_timeout = NULL, grant_workspace_access = FALSE,
    +  subscription_id = NULL, resource_group = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    workspace

    The Workspace object.

    datastore_name

    A string of the name of the datastore. The name +must be case insensitive and can only contain alphanumeric characters and +underscores.

    container_name

    A string of the name of the Azure blob container.

    account_name

    A string of the storage account name.

    sas_token

    A string of the account SAS token.

    account_key

    A string of the storage account key.

    protocol

    A string of the protocol to use to connect to the +blob container. If NULL, defaults to 'https'.

    endpoint

    A string of the endpoint of the blob container. +If NULL, defaults to 'core.windows.net'.

    overwrite

    If TRUE, overwrites an existing datastore. If +the datastore does not exist, it will create one.

    create_if_not_exists

    If TRUE, creates the blob container +if it does not exists.

    skip_validation

    If TRUE, skips validation of storage keys.

    blob_cache_timeout

    An integer of the cache timeout in seconds +when this blob is mounted. If NULL, defaults to no timeout (i.e. +blobs will be cached for the duration of the job when read).

    grant_workspace_access

    If TRUE, grants workspace Managed Identities +(MSI) access to the user storage account. This should be set to TRUE if the +storage account is in VNET. If TRUE, Azure ML will use the workspace MSI +token to grant access to the user storage account. It may take a while for +the granted access to reflect.

    subscription_id

    A string of the subscription id of the storage +account.

    resource_group

    A string of the resource group of the storage account.

    + +

    Value

    + +

    The AzureBlobDatastore object.

    + +

    Details

    + + + +

    In general we recommend Azure Blob storage over Azure File storage. Both +standard and premium storage are available for blobs. Although more +expensive, we suggest premium storage due to faster throughput speeds that +may improve the speed of your training runs, particularly if you train +against a large dataset.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +ds <- register_azure_blob_container_datastore(
    +                               ws,
    +                               datastore_name = 'mydatastore',
    +                               container_name = 'myazureblobcontainername',
    +                               account_name = 'mystorageaccoutname',
    +                               account_key = 'mystorageaccountkey')
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/register_azure_file_share_datastore.html b/docs/reference/register_azure_file_share_datastore.html new file mode 100644 index 00000000..9b9cc936 --- /dev/null +++ b/docs/reference/register_azure_file_share_datastore.html @@ -0,0 +1,260 @@ + + + + + + + + +Register an Azure file share as a datastore — register_azure_file_share_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Register an Azure file share as a datastore. You can choose to use +either the SAS token or the storage account key.

    + +
    + +
    register_azure_file_share_datastore(workspace, datastore_name,
    +  file_share_name, account_name, sas_token = NULL, account_key = NULL,
    +  protocol = NULL, endpoint = NULL, overwrite = FALSE,
    +  create_if_not_exists = FALSE, skip_validation = FALSE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    workspace

    The Workspace object.

    datastore_name

    A string of the name of the datastore. The name +must be case insensitive and can only contain alphanumeric characters and +underscores.

    file_share_name

    A string of the name of the Azure file share.

    account_name

    A string of the storage account name.

    sas_token

    A string of the account SAS token.

    account_key

    A string of the storage account key.

    protocol

    A string of the protocol to use to connect to the +file store. If NULL, defaults to 'https'.

    endpoint

    A string of the endpoint of the file store. +If NULL, defaults to 'core.windows.net'.

    overwrite

    If TRUE, overwrites an existing datastore. If +the datastore does not exist, it will create one.

    create_if_not_exists

    If TRUE, creates the file share +if it does not exists.

    skip_validation

    If TRUE, skips validation of storage keys.

    + +

    Value

    + +

    The AzureFileDatastore object.

    + +

    Details

    + + + +

    In general we recommend Azure Blob storage over Azure File storage. Both +standard and premium storage are available for blobs. Although more +expensive, we suggest premium storage due to faster throughput speeds that +may improve the speed of your training runs, particularly if you train +against a large dataset.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +ds <- register_azure_file_share_datastore(
    +                                   ws,
    +                                   datastore_name = 'mydatastore',
    +                                   file_share_name = 'myazurefilesharename',
    +                                   account_name = 'mystorageaccoutname',
    +                                   account_key = 'mystorageaccountkey')
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/register_environment.html b/docs/reference/register_environment.html new file mode 100644 index 00000000..f7e1ca95 --- /dev/null +++ b/docs/reference/register_environment.html @@ -0,0 +1,202 @@ + + + + + + + + +Register an environment in the workspace — register_environment • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    The environment is automatically registered with your workspace when you +submit an experiment or deploy a web service. You can also manually register +the environment with register_environment(). This operation makes the +environment into an entity that is tracked and versioned in the cloud, and +can be shared between workspace users.

    +

    Whe used for the first time in training or deployment, the environment is +registered with the workspace, built, and deployed on the compute target. +The environments are cached by the service. Reusing a cached environment +takes much less time than using a new service or one that has bee updated.

    + +
    + +
    register_environment(environment, workspace)
    + +

    Arguments

    + + + + + + + + + + +
    environment

    The Environment object.

    workspace

    The Workspace object.

    + +

    Value

    + +

    The Environment object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/register_model.html b/docs/reference/register_model.html new file mode 100644 index 00000000..acd570d9 --- /dev/null +++ b/docs/reference/register_model.html @@ -0,0 +1,214 @@ + + + + + + + + +Register a model with the provided workspace. — register_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Register a model with the provided workspace.

    + +
    + +
    register_model(workspace, model_path, model_name, tags = NULL,
    +  properties = NULL, description = NULL, child_paths = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    workspace

    The workspace to register the model under

    model_path

    String which points to the path on the local file system +where the model assets are located. This can be a direct pointer to a single +file or folder. If pointing to a folder, the child_paths parameter can be +used to specify individual files to bundle together as the Model object, +as opposed to using the entire contents of the folder.

    model_name

    The name to register the model with

    tags

    Dictionary of key value tags to give the model

    properties

    Dictionary of key value properties to give the model. +These properties cannot be changed after model creation, however new key +value pairs can be added

    description

    A text description of the model

    child_paths

    If provided in conjunction with a model_path to a folder, +only the specified files will be bundled into the Model object.

    + +

    Value

    + +

    The registered Model object

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/reload_local_webservice_assets.html b/docs/reference/reload_local_webservice_assets.html new file mode 100644 index 00000000..c3a2cffb --- /dev/null +++ b/docs/reference/reload_local_webservice_assets.html @@ -0,0 +1,205 @@ + + + + + + + + +Reload the LocalWebservice's execution script and dependencies. +This restarts the service's container with copies of updated assets, +including the execution script and local dependencies, but it does not +rebuild the underlying image. Accordingly, changes to Conda/pip dependencies +or custom Docker steps will not be reflected in the reloaded LocalWebservice. +To handle those changes call LocalWebservice.update(), instead. — reload_local_webservice_assets • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Reload the LocalWebservice's execution script and dependencies. +This restarts the service's container with copies of updated assets, +including the execution script and local dependencies, but it does not +rebuild the underlying image. Accordingly, changes to Conda/pip dependencies +or custom Docker steps will not be reflected in the reloaded LocalWebservice. +To handle those changes call LocalWebservice.update(), instead.

    + +
    + +
    reload_local_webservice_assets(webservice, wait = FALSE)
    + +

    Arguments

    + + + + + + + + + + +
    webservice

    LocalWebservice object.

    wait

    Wait for the service's container to reach a healthy state.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/save_model_package_files.html b/docs/reference/save_model_package_files.html new file mode 100644 index 00000000..5020a6f5 --- /dev/null +++ b/docs/reference/save_model_package_files.html @@ -0,0 +1,186 @@ + + + + + + + + +Save the package output to a local directory. +This can only be used with a Dockerfile package. — save_model_package_files • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Save the package output to a local directory. +This can only be used with a Dockerfile package.

    + +
    + +
    save_model_package_files(package, output_directory)
    + +

    Arguments

    + + + + + + + + + + +
    package

    Package created with model(s) and dependencies.

    output_directory

    Local directory that will be created to contain +the contents of the package.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/serialize_model.html b/docs/reference/serialize_model.html new file mode 100644 index 00000000..a5405958 --- /dev/null +++ b/docs/reference/serialize_model.html @@ -0,0 +1,182 @@ + + + + + + + + +Convert this Model into a json serialized dictionary — serialize_model • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Convert this Model into a json serialized dictionary

    + +
    + +
    serialize_model(model)
    + +

    Arguments

    + + + + + + +
    model

    The model to download

    + +

    Value

    + +

    The json representation of this Model

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/serialize_webservice.html b/docs/reference/serialize_webservice.html new file mode 100644 index 00000000..e9c0abcc --- /dev/null +++ b/docs/reference/serialize_webservice.html @@ -0,0 +1,182 @@ + + + + + + + + +Convert this Webservice into a json serialized dictionary. — serialize_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Convert this Webservice into a json serialized dictionary.

    + +
    + +
    serialize_webservice(webservice)
    + +

    Arguments

    + + + + + + +
    webservice

    The webservice object.

    + +

    Value

    + +

    The json representation of this Webservice

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/set_default_datastore.html b/docs/reference/set_default_datastore.html new file mode 100644 index 00000000..891e15da --- /dev/null +++ b/docs/reference/set_default_datastore.html @@ -0,0 +1,180 @@ + + + + + + + + +Set the default datastore for a workspace — set_default_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Set the default datastore associated with the workspace.

    + +
    + +
    set_default_datastore(workspace, datastore_name)
    + +

    Arguments

    + + + + + + + + + + +
    workspace

    The Workspace object.

    datastore_name

    The name of the datastore to be set as default.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/set_secrets.html b/docs/reference/set_secrets.html new file mode 100644 index 00000000..b4efdbe9 --- /dev/null +++ b/docs/reference/set_secrets.html @@ -0,0 +1,195 @@ + + + + + + + + +Add secrets to a keyvault — set_secrets • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Add a named list of secrets into the keyvault associated with the +workspace.

    + +
    + +
    set_secrets(keyvault, secrets)
    + +

    Arguments

    + + + + + + + + + + +
    keyvault

    The Keyvault object.

    secrets

    The named list of secrets to be added to the keyvault, +where element name corresponds to the secret name.

    + +

    Examples

    + + +
    ws <- load_workspace_from_config()
    +my_secret <- Sys.getenv("MY_SECRET")
    +keyvault <- get_default_keyvault(ws)
    +set_secrets(list("mysecret" = my_secret))
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/submit_experiment.html b/docs/reference/submit_experiment.html new file mode 100644 index 00000000..34ed55f9 --- /dev/null +++ b/docs/reference/submit_experiment.html @@ -0,0 +1,233 @@ + + + + + + + + +Submit an experiment and return the active created run — submit_experiment • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    submit_experiment() is an asynchronous call to Azure Machine Learning +service to execute a trial on local or remote compute. Depending on the +configuration, submit_experiment() will automatically prepare your +execution environments, execute your code, and capture your source code +and results in the experiment's run history.

    +

    To submit an experiment you first need to create a configuration object +describing how the experiment is to be run. The configuration depends on +the type of trial required. For a script run, provide an Estimator object +to the config parameter. For a HyperDrive run for hyperparameter tuning, +provide a HyperDriveConfig to config.

    + +
    + +
    submit_experiment(experiment, config, tags = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    experiment

    The Experiment object.

    config

    The Estimator or HyperDriveConfig object.

    tags

    A named list of tags for the submitted run, e.g. +list("tag" = "value").

    + +

    Value

    + +

    The ScriptRun or HyperDriveRun object.

    + +

    Examples

    + + + +

    The following example submits an Estimator experiment.

    ws <- load_workspace_from_config()
    +compute_target <- get_compute(ws, cluster_name = 'mycluster')
    +exp <- experiment(ws, name = 'myexperiment')
    +est <- estimator(source_directory = '.',
    +                 entry_script = 'train.R',
    +                 compute_target = compute_target)
    +run <- submit_experiment(exp, est)
    +
    + +

    For an example of submitting a HyperDrive experiment, see the +"Examples" section of hyperdrive_config().

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/truncation_selection_policy.html b/docs/reference/truncation_selection_policy.html new file mode 100644 index 00000000..05770627 --- /dev/null +++ b/docs/reference/truncation_selection_policy.html @@ -0,0 +1,242 @@ + + + + + + + + +Define a truncation selection policy for early termination of HyperDrive runs — truncation_selection_policy • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Truncation selection cancels a given percentage of lowest performing runs at +each evaluation interval. Runs are compared based on their performance on the +primary metric and the lowest X

    + +
    + +
    truncation_selection_policy(truncation_percentage,
    +  evaluation_interval = 1L, delay_evaluation = 0L)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    truncation_percentage

    An integer of the percentage of lowest +performing runs to terminate at each interval.

    evaluation_interval

    An integer of the frequency for applying policy.

    delay_evaluation

    An integer of the number of intervals for which to +delay the first evaluation.

    + +

    Value

    + +

    The TruncationSelectionPolicy object.

    + +

    Details

    + + + +

    This policy periodically cancels the given percentage of runs that rank the +lowest for their performance on the primary metric. The policy strives for +fairness in ranking the runs by accounting for improving model performance +with training time. When ranking a relatively young run, the policy uses the +corresponding (and earlier) performance of older runs for comparison. +Therefore, runs aren't terminated for having a lower performance because they +have run for less time than other runs.

    +

    The truncation selection policy takes the following configuration parameters:

      +
    • truncation_percentage: An integer of the percentage of lowest performing +runs to terminate at each evaluation interval.

    • +
    • evaluation_interval: Optional. The frequency for applying the policy. +Each time the training script logs the primary metric counts as one +interval.

    • +
    • delay_evaluation: Optional. The number of intervals to delay the +policy evaluation. Use this parameter to avoid premature termination +of training runs. If specified, the policy applies every multiple of +evaluation_interval that is greater than or equal to delay_evaluation.

    • +
    + +

    For example, when evaluating a run at a interval N, its performance is only +compared with the performance of other runs up to interval N even if they +reported metrics for intervals greater than N.

    + +

    Examples

    + + + +

    In this example, the early termination policy is applied at every interval +starting at evaluation interval 5. A run will be terminated at interval 5 +if its performance at interval 5 is in the lowest 20 +runs at interval 5.

    early_termination_policy = truncation_selection_policy(
    +                                                 truncation_percentage = 20L,
    +                                                 evaluation_interval = 1L,
    +                                                 delay_evaluation = 5L)
    +
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/uniform.html b/docs/reference/uniform.html new file mode 100644 index 00000000..7ac7c253 --- /dev/null +++ b/docs/reference/uniform.html @@ -0,0 +1,197 @@ + + + + + + + + +Specify a uniform distribution of options to sample from — uniform • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Specify a uniform distribution of options to sample the +hyperparameters from.

    + +
    + +
    uniform(min_value, max_value)
    + +

    Arguments

    + + + + + + + + + + +
    min_value

    A double of the minimum value in the range +(inclusive).

    max_value

    A double of the maximum value in the range +(inclusive).

    + +

    Value

    + +

    A list of the stochastic expression.

    + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/unregister_datastore.html b/docs/reference/unregister_datastore.html new file mode 100644 index 00000000..261649ee --- /dev/null +++ b/docs/reference/unregister_datastore.html @@ -0,0 +1,178 @@ + + + + + + + + +Unregister a datastore from its associated workspace — unregister_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Unregister the datastore from its associated workspace. The +underlying Azure storage will not be deleted.

    + +
    + +
    unregister_datastore(datastore)
    + +

    Arguments

    + + + + + + +
    datastore

    The AzureBlobDatastore or AzureFileDatastore object.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/update_aci_webservice.html b/docs/reference/update_aci_webservice.html new file mode 100644 index 00000000..e0b7c02c --- /dev/null +++ b/docs/reference/update_aci_webservice.html @@ -0,0 +1,233 @@ + + + + + + + + +Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice. — update_aci_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice.

    + +
    + +
    update_aci_webservice(webservice, tags = NULL, properties = NULL,
    +  description = NULL, auth_enabled = NULL, ssl_enabled = NULL,
    +  ssl_cert_pem_file = NULL, ssl_key_pem_file = NULL,
    +  ssl_cname = NULL, enable_app_insights = NULL, models = NULL,
    +  inference_config = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    webservice

    AciWebservice object.

    tags

    Dictionary of key value tags to give this Webservice. +Will replace existing tags.

    properties

    Dictionary of key value properties to add to existing +properties dictionary.

    description

    A description to give this Webservice.

    auth_enabled

    Enable or disable auth for this Webservice.

    ssl_enabled

    Whether or not to enable SSL for this Webservice.

    ssl_cert_pem_file

    The cert file needed if SSL is enabled.

    ssl_key_pem_file

    The key file needed if SSL is enabled.

    ssl_cname

    The cname for if SSL is enabled.

    enable_app_insights

    Whether or not to enable AppInsights for this +Webservice.

    models

    A list of Model objects to package into the updated service.

    inference_config

    An InferenceConfig object used to provide the +required model deployment properties.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/update_aks_webservice.html b/docs/reference/update_aks_webservice.html new file mode 100644 index 00000000..9b2b1ff7 --- /dev/null +++ b/docs/reference/update_aks_webservice.html @@ -0,0 +1,321 @@ + + + + + + + + +Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice. — update_aks_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice.

    + +
    + +
    update_aks_webservice(webservice, autoscale_enabled = NULL,
    +  autoscale_min_replicas = NULL, autoscale_max_replicas = NULL,
    +  autoscale_refresh_seconds = NULL,
    +  autoscale_target_utilization = NULL, auth_enabled = NULL,
    +  cpu_cores = NULL, memory_gb = NULL, enable_app_insights = NULL,
    +  scoring_timeout_ms = NULL, replica_max_concurrent_requests = NULL,
    +  max_request_wait_time = NULL, num_replicas = NULL, tags = NULL,
    +  properties = NULL, description = NULL, models = NULL,
    +  inference_config = NULL, gpu_cores = NULL, period_seconds = NULL,
    +  initial_delay_seconds = NULL, timeout_seconds = NULL,
    +  success_threshold = NULL, failure_threshold = NULL,
    +  namespace = NULL, token_auth_enabled = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    webservice

    AciWebservice object.

    autoscale_enabled

    Enable or disable autoscaling of this Webservice

    autoscale_min_replicas

    The minimum number of containers to use when +autoscaling this Webservice

    autoscale_max_replicas

    The maximum number of containers to use when +autoscaling this Webservice

    autoscale_refresh_seconds

    How often the autoscaler should attempt to +scale this Webservice

    autoscale_target_utilization

    The target utilization (in percent out +of 100) the autoscaler should attempt to maintain for this Webservice

    auth_enabled

    Whether or not to enable auth for this Webservice

    cpu_cores

    The number of cpu cores to allocate for this Webservice. +Can be a decimal

    memory_gb

    The amount of memory (in GB) to allocate for this +Webservice. Can be a decimal

    enable_app_insights

    Whether or not to enable Application Insights +logging for this Webservice

    scoring_timeout_ms

    A timeout to enforce for scoring calls to this +Webservice

    replica_max_concurrent_requests

    The number of maximum concurrent +requests per node to allow for this Webservice

    max_request_wait_time

    The maximum amount of time a request will stay +in the queue (in milliseconds) before returning a 503 error

    num_replicas

    The number of containers to allocate for this Webservice

    tags

    Dictionary of key value tags to give this Webservice. Will +replace existing tags.

    properties

    Dictionary of key value properties to add to existing +properties dictionary

    description

    A description to give this Webservice

    models

    A list of Model objects to package with the updated service

    inference_config

    An InferenceConfig object used to provide the +required model deployment properties.

    gpu_cores

    The number of gpu cores to allocate for this Webservice

    period_seconds

    How often (in seconds) to perform the liveness probe. +Default to 10 seconds. Minimum value is 1.

    initial_delay_seconds

    Number of seconds after the container has +started before liveness probes are initiated.

    timeout_seconds

    Number of seconds after which the liveness probe +times out. Defaults to 1 second. Minimum value is 1.

    success_threshold

    Minimum consecutive successes for the liveness +probe to be considered successful after having failed. Defaults to 1. +Minimum value is 1.

    failure_threshold

    When a Pod starts and the liveness probe fails, +Kubernetes will try failureThreshold times before giving up. Defaults to 3. +Minimum value is 1.

    namespace

    The Kubernetes namespace in which to deploy this +Webservice: up to 63 lowercase alphanumeric ('a'-'z', '0'-'9') and hyphen +('-') characters. The first and last characters cannot be hyphens.

    token_auth_enabled

    Whether or not to enable Token auth for this +Webservice. If this is enabled, users can access this Webservice by fetching +access token using their Azure Active Directory credentials. +Defaults to FALSE

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/update_aml_compute.html b/docs/reference/update_aml_compute.html new file mode 100644 index 00000000..402dbb91 --- /dev/null +++ b/docs/reference/update_aml_compute.html @@ -0,0 +1,192 @@ + + + + + + + + +Update scale settings for an AmlCompute cluster — update_aml_compute • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Update the scale settings for an existing AmlCompute cluster.

    + +
    + +
    update_aml_compute(cluster, min_nodes = NULL, max_nodes = NULL,
    +  idle_seconds_before_scaledown = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    cluster

    The AmlCompute cluster.

    min_nodes

    An integer of the minimum number of nodes to use on +the cluster.

    max_nodes

    An integer of the maximum number of nodes to use on +the cluster.

    idle_seconds_before_scaledown

    An integer of the node idle time +in seconds before scaling down the cluster.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/update_local_webservice.html b/docs/reference/update_local_webservice.html new file mode 100644 index 00000000..863f0a1f --- /dev/null +++ b/docs/reference/update_local_webservice.html @@ -0,0 +1,200 @@ + + + + + + + + +Update the LocalWebservice with provided properties. +Values left as None will remain unchanged in this LocalWebservice. — update_local_webservice • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Update the LocalWebservice with provided properties. +Values left as None will remain unchanged in this LocalWebservice.

    + +
    + +
    update_local_webservice(webservice, models = NULL,
    +  deployment_config = NULL, wait = FALSE, inference_config = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    webservice

    LocalWebservice object.

    models

    A new list of models contained in the LocalWebservice.

    deployment_config

    Deployment configuration options to apply to the +LocalWebservice.

    wait

    Wait for the service's container to reach a healthy state.

    inference_config

    An InferenceConfig object used to provide the +required model deployment properties.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/upload_files_to_datastore.html b/docs/reference/upload_files_to_datastore.html new file mode 100644 index 00000000..893254f8 --- /dev/null +++ b/docs/reference/upload_files_to_datastore.html @@ -0,0 +1,214 @@ + + + + + + + + +Upload files to the Azure storage a datastore points to — upload_files_to_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Upload the data from the local file system to the Azure storage that the +datastore points to.

    + +
    + +
    upload_files_to_datastore(datastore, files, relative_root = NULL,
    +  target_path = NULL, overwrite = FALSE, show_progress = TRUE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    datastore

    The AzureBlobDatastore or AzureFileDatastore object.

    files

    A list of strings of the absolute path to files to upload.

    relative_root

    A string of the base path from which is used to +determine the path of the files in the Azure storage. For example, if +we upload /path/to/file.txt, and we define the base path to be /path, +when file.txt is uploaded to the blob storage or file share, it will +have the path of /to/file.txt. If target_path is also given, then it +will be used as the prefix for the derived path from above. The base path +must be a common path of all of the files, otherwise an exception will be +thrown.

    target_path

    A string of the location in the blob container or file +share to upload the data to. Defaults to NULL, in which case the data is +uploaded to the root.

    overwrite

    If TRUE, overwrites any existing data at target_path.

    show_progress

    If TRUE, show progress of upload in the console.

    + +

    Value

    + +

    The DataReference object for the target path uploaded.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/upload_to_datastore.html b/docs/reference/upload_to_datastore.html new file mode 100644 index 00000000..6126a334 --- /dev/null +++ b/docs/reference/upload_to_datastore.html @@ -0,0 +1,201 @@ + + + + + + + + +Upload a local directory to the Azure storage a datastore points to — upload_to_datastore • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Upload a local directory to the Azure storage the datastore points to.

    + +
    + +
    upload_to_datastore(datastore, src_dir, target_path = NULL,
    +  overwrite = FALSE, show_progress = TRUE)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    datastore

    The AzureBlobDatastore or AzureFileDatastore object.

    src_dir

    A string of the local directory to upload.

    target_path

    A string of the location in the blob container or +file share to upload the data to. Defaults to NULL, in which case the data +is uploaded to the root.

    overwrite

    If TRUE, overwrites any existing data at target_path.

    show_progress

    If TRUE, show progress of upload in the console.

    + +

    Value

    + +

    The DataReference object for the target path uploaded.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/view_run_details.html b/docs/reference/view_run_details.html new file mode 100644 index 00000000..897ecc0a --- /dev/null +++ b/docs/reference/view_run_details.html @@ -0,0 +1,176 @@ + + + + + + + + +Plot table of run details in Viewer — view_run_details • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Plot table of run details in Viewer

    + +
    + +
    view_run_details(run)
    + +

    Arguments

    + + + + + + +
    run

    run used for plotting

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/wait_for_deployment.html b/docs/reference/wait_for_deployment.html new file mode 100644 index 00000000..1c4b5283 --- /dev/null +++ b/docs/reference/wait_for_deployment.html @@ -0,0 +1,180 @@ + + + + + + + + +Automatically poll on the running Webservice deployment. — wait_for_deployment • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Automatically poll on the running Webservice deployment.

    + +
    + +
    wait_for_deployment(webservice, show_output = FALSE)
    + +

    Arguments

    + + + + + + + + + + +
    webservice

    The webservice object.

    show_output

    Option to print more verbose output.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/wait_for_model_package_creation.html b/docs/reference/wait_for_model_package_creation.html new file mode 100644 index 00000000..4c2cbef5 --- /dev/null +++ b/docs/reference/wait_for_model_package_creation.html @@ -0,0 +1,181 @@ + + + + + + + + +Wait for the package to finish creating. — wait_for_model_package_creation • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Wait for the package to finish creating.

    + +
    + +
    wait_for_model_package_creation(package, show_output = FALSE)
    + +

    Arguments

    + + + + + + + + + + +
    package

    Package created with model(s) and dependencies.

    show_output

    Boolean option to print more verbose output. Defaults to +FALSE.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/wait_for_provisioning_completion.html b/docs/reference/wait_for_provisioning_completion.html new file mode 100644 index 00000000..361efd72 --- /dev/null +++ b/docs/reference/wait_for_provisioning_completion.html @@ -0,0 +1,203 @@ + + + + + + + + +Wait for a cluster to finish provisioning — wait_for_provisioning_completion • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Wait for a cluster to finish provisioning. Typically invoked after a +create_aml_compute() or create_aks_compute() call.

    + +
    + +
    wait_for_provisioning_completion(cluster, show_output = FALSE)
    + +

    Arguments

    + + + + + + + + + + +
    cluster

    The AmlCompute or AksCompute object.

    show_output

    If TRUE, more verbose output will be provided.

    + +

    Examples

    + + + +

    Wait for an AmlCompute cluster to finish provisioning.

    ws <- load_workspace_from_config()
    +compute_target <- create_aml_compute(ws,
    +                                     cluster_name = 'mycluster',
    +                                     vm_size = 'STANDARD_D2_V2',
    +                                     max_nodes = 1)
    +wait_for_provisioning_completion(compute_target)
    +
    + + +

    See also

    + + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/wait_for_run_completion.html b/docs/reference/wait_for_run_completion.html new file mode 100644 index 00000000..465526a9 --- /dev/null +++ b/docs/reference/wait_for_run_completion.html @@ -0,0 +1,180 @@ + + + + + + + + +Wait for the completion of this run — wait_for_run_completion • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Wait for the completion of this run

    + +
    + +
    wait_for_run_completion(run, show_output = TRUE)
    + +

    Arguments

    + + + + + + + + + + +
    run

    Run object

    show_output

    print verbose output to console

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/write_workspace_config.html b/docs/reference/write_workspace_config.html new file mode 100644 index 00000000..4b9bbfcc --- /dev/null +++ b/docs/reference/write_workspace_config.html @@ -0,0 +1,198 @@ + + + + + + + + +Write out the workspace configuration details to a config file — write_workspace_config • azureml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Write out the workspace ARM properties to a config file. Workspace ARM +properties can be loaded later using load_workspace_from_config(). +The method provides a simple way of reusing the same workspace across +multiple files or projects. Users can save the workspace ARM properties +using this function, and use load_workspace_from_config() to load the +same workspace in different files or projects without retyping the +workspace ARM properties.

    + +
    + +
    write_workspace_config(workspace, path = NULL, file_name = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    workspace

    The Workspace object whose config has to be written down.

    path

    A string of the location to write the config.json file. +The parameter defaults to the current working directory.

    file_name

    A string of the name to use for the config file. The +parameter defaults to 'config.json'.

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/man/aci_webservice_deployment_config.Rd b/man/aci_webservice_deployment_config.Rd new file mode 100644 index 00000000..a11b2435 --- /dev/null +++ b/man/aci_webservice_deployment_config.Rd @@ -0,0 +1,58 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-aci.R +\name{aci_webservice_deployment_config} +\alias{aci_webservice_deployment_config} +\title{Create a configuration object for deploying an ACI Webservice.} +\usage{ +aci_webservice_deployment_config(cpu_cores = NULL, memory_gb = NULL, + tags = NULL, properties = NULL, description = NULL, + location = NULL, auth_enabled = NULL, ssl_enabled = NULL, + enable_app_insights = NULL, ssl_cert_pem_file = NULL, + ssl_key_pem_file = NULL, ssl_cname = NULL, dns_name_label = NULL) +} +\arguments{ +\item{cpu_cores}{The number of cpu cores to allocate for this Webservice. +Can be a decimal.} + +\item{memory_gb}{The amount of memory (in GB) to allocate for this +Webservice. Can be a decimal. Defaults to 0.5} + +\item{tags}{Dictionary of key value tags to give this Webservice} + +\item{properties}{Dictionary of key value properties to give this +Webservice. These properties cannot be changed after deployment, however +new key value pairs can be added} + +\item{description}{A description to give this Webservice} + +\item{location}{The Azure region to deploy this Webservice to. If not +specified the Workspace location will be used. More details on available +regions can be found here: +https://azure.microsoft.com/en-us/global-infrastructure/services/?regions=all&products=container-instances} + +\item{auth_enabled}{Whether or not to enable auth for this Webservice. +Defaults to FALSE} + +\item{ssl_enabled}{Whether or not to enable SSL for this Webservice. +Defaults to FALSE} + +\item{enable_app_insights}{Whether or not to enable AppInsights for this +Webservice. Defaults to FALSE} + +\item{ssl_cert_pem_file}{The cert file needed if SSL is enabled} + +\item{ssl_key_pem_file}{The key file needed if SSL is enabled} + +\item{ssl_cname}{The cname for if SSL is enabled} + +\item{dns_name_label}{The dns name label for the scoring endpoint. +If not specified a unique dns name label will be generated for the scoring +endpoint.} +} +\value{ +AciServiceDeploymentConfiguration object to use when deploying a +Webservice object +} +\description{ +Create a configuration object for deploying an ACI Webservice. +} diff --git a/man/aks_webservice_deployment_config.Rd b/man/aks_webservice_deployment_config.Rd new file mode 100644 index 00000000..955f4ee6 --- /dev/null +++ b/man/aks_webservice_deployment_config.Rd @@ -0,0 +1,109 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-aks.R +\name{aks_webservice_deployment_config} +\alias{aks_webservice_deployment_config} +\title{Create a configuration object for deploying to an AKS compute target.} +\usage{ +aks_webservice_deployment_config(autoscale_enabled = NULL, + autoscale_min_replicas = NULL, autoscale_max_replicas = NULL, + autoscale_refresh_seconds = NULL, + autoscale_target_utilization = NULL, auth_enabled = NULL, + cpu_cores = NULL, memory_gb = NULL, enable_app_insights = NULL, + scoring_timeout_ms = NULL, replica_max_concurrent_requests = NULL, + max_request_wait_time = NULL, num_replicas = NULL, + primary_key = NULL, secondary_key = NULL, tags = NULL, + properties = NULL, description = NULL, gpu_cores = NULL, + period_seconds = NULL, initial_delay_seconds = NULL, + timeout_seconds = NULL, success_threshold = NULL, + failure_threshold = NULL, namespace = NULL, + token_auth_enabled = NULL) +} +\arguments{ +\item{autoscale_enabled}{Whether or not to enable autoscaling for this +Webservice. Defaults to True if num_replicas is None} + +\item{autoscale_min_replicas}{The minimum number of containers to use when +autoscaling this Webservice. Defaults to 1} + +\item{autoscale_max_replicas}{The maximum number of containers to use when +autoscaling this Webservice. Defaults to 10} + +\item{autoscale_refresh_seconds}{How often the autoscaler should attempt to +scale this Webservice. Defaults to 1} + +\item{autoscale_target_utilization}{The target utilization (in percent out +of 100) the autoscaler should attempt to maintain for this Webservice. +Defaults to 70} + +\item{auth_enabled}{Whether or not to enable key auth for this Webservice. +Defaults to TRUE} + +\item{cpu_cores}{The number of cpu cores to allocate for this Webservice. +Can be a decimal. Defaults to 0.1} + +\item{memory_gb}{The amount of memory (in GB) to allocate for this +Webservice. Can be a decimal. Defaults to 0.5} + +\item{enable_app_insights}{Whether or not to enable Application Insights +logging for this Webservice. Defaults to FALSE} + +\item{scoring_timeout_ms}{A timeout to enforce for scoring calls to this +Webservice. Defaults to 60000} + +\item{replica_max_concurrent_requests}{The number of maximum concurrent +requests per node to allow for this Webservice. Defaults to 1} + +\item{max_request_wait_time}{The maximum amount of time a request will stay +in the queue (in milliseconds) before returning a 503 error. Defaults to 500} + +\item{num_replicas}{The number of containers to allocate for this +Webservice. No default, if this parameter is not set then the autoscaler is +enabled by default.} + +\item{primary_key}{A primary auth key to use for this Webservice} + +\item{secondary_key}{A secondary auth key to use for this Webservice} + +\item{tags}{Dictionary of key value tags to give this Webservice} + +\item{properties}{Dictionary of key value properties to give this +Webservice. These properties cannot be changed after deployment, however new +key value pairs can be added} + +\item{description}{A description to give this Webservice} + +\item{gpu_cores}{The number of gpu cores to allocate for this Webservice. +Default is 1} + +\item{period_seconds}{How often (in seconds) to perform the liveness probe. +Default to 10 seconds. Minimum value is 1.} + +\item{initial_delay_seconds}{Number of seconds after the container has +started before liveness probes are initiated. Defaults to 310} + +\item{timeout_seconds}{Number of seconds after which the liveness probe +times out. Defaults to 2 second. Minimum value is 1} + +\item{success_threshold}{Minimum consecutive successes for the liveness +probe to be considered successful after having failed. Defaults to 1. +Minimum value is 1.} + +\item{failure_threshold}{When a Pod starts and the liveness probe fails, +Kubernetes will try failureThreshold times before giving up. Defaults to 3. +Minimum value is 1.} + +\item{namespace}{The Kubernetes namespace in which to deploy this +Webservice: up to 63 lowercase alphanumeric ('a'-'z', '0'-'9') and hyphen +('-') characters. The first and last characters cannot be hyphens.} + +\item{token_auth_enabled}{Whether or not to enable Token auth for this +Webservice. If this is enabled, users can access this Webservice by fetching +access token using their Azure Active Directory credentials. +Defaults to FALSE} +} +\value{ +AksServiceDeploymentConfiguration object +} +\description{ +Create a configuration object for deploying to an AKS compute target. +} diff --git a/man/attach_aks_compute.Rd b/man/attach_aks_compute.Rd new file mode 100644 index 00000000..bb554bb1 --- /dev/null +++ b/man/attach_aks_compute.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{attach_aks_compute} +\alias{attach_aks_compute} +\title{Attach an existing AKS cluster to a workspace} +\usage{ +attach_aks_compute(workspace, cluster_name, resource_id = NULL, + resource_group = NULL) +} +\arguments{ +\item{workspace}{The \code{Workspace} object to attach the AKS cluster to.} + +\item{cluster_name}{A string of the name for the cluster.} + +\item{resource_id}{A string of the resource ID for the AKS cluster being +attached.} + +\item{resource_group}{A string of the resource group in which the AKS cluster +is located.} +} +\value{ +The \code{AksCompute} object. +} +\description{ +If you already have an AKS cluster in your Azure subscription, and it is +version 1.12.##, you can attach it to your workspace to use for deployments. +The existing AKS cluster can be in a different Azure region than your +workspace. + +If you want to secure your AKS cluster using an Azure Virtual Network, you +must create the virtual network first. For more information, see +\href{https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#aksvnet}{Secure Azure ML experimentation and inference jobs within an Azure Virtual Network} +} +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +compute_target <- attach_aks_compute(ws, + cluster_name = 'mycluster', + resource_id = 'myresourceid', + resource_group = 'myresourcegroup') +} +} + diff --git a/man/azureml.Rd b/man/azureml.Rd new file mode 100644 index 00000000..31f61abb --- /dev/null +++ b/man/azureml.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/modules.R +\docType{data} +\name{azureml} +\alias{azureml} +\title{azureml module +User can access functions/modules in azureml that are not exposed through the +exported R functions.} +\format{An object of class \code{python.builtin.module} (inherits from \code{python.builtin.object}) of length 5.} +\usage{ +azureml +} +\description{ +azureml module +User can access functions/modules in azureml that are not exposed through the +exported R functions. +} +\keyword{datasets} diff --git a/man/bandit_policy.Rd b/man/bandit_policy.Rd new file mode 100644 index 00000000..dfb3a4c5 --- /dev/null +++ b/man/bandit_policy.Rd @@ -0,0 +1,78 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{bandit_policy} +\alias{bandit_policy} +\title{Define a Bandit policy for early termination of HyperDrive runs} +\usage{ +bandit_policy(slack_factor = NULL, slack_amount = NULL, + evaluation_interval = 1L, delay_evaluation = 0L) +} +\arguments{ +\item{slack_factor}{A double of the ratio of the allowed distance from +the best performing run.} + +\item{slack_amount}{A double of the absolute distance allowed from the +best performing run.} + +\item{evaluation_interval}{An integer of the frequency for applying policy.} + +\item{delay_evaluation}{An integer of the number of intervals for which to +delay the first evaluation.} +} +\value{ +The \code{BanditPolicy} object. +} +\description{ +Bandit is an early termination policy based on slack factor/slack amount +and evaluation interval. The policy early terminates any runs where the +primary metric is not within the specified slack factor/slack amount with +respect to the best performing training run. +} +\section{Details}{ + +The Bandit policy takes the following configuration parameters: +\itemize{ +\item \code{slack_factor} or \code{slack_amount}: The slack allowed with respect to +the best performing training run. \code{slack_factor} specifies the +allowable slack as a ration. \code{slack_amount} specifies the allowable +slack as an absolute amount, instead of a ratio. +\item \code{evaluation_interval}: Optional. The frequency for applying the policy. +Each time the training script logs the primary metric counts as one +interval. +\item \code{delay_evaluation}: Optional. The number of intervals to delay the +policy evaluation. Use this parameter to avoid premature termination +of training runs. If specified, the policy applies every multiple of +\code{evaluation_interval} that is greater than or equal to \code{delay_evaluation}. +} + +Any run that doesn't fall within the slack factor or slack amount of the +evaluation metric with respect to the best performing run will be +terminated. + +Consider a Bandit policy with \code{slack_factor = 0.2} and +\code{evaluation_interval = 100}. Assume that run X is the currently best +performing run with an AUC (performance metric) of 0.8 after 100 intervals. +Further, assume the best AUC reported for a run is Y. This policy compares +the value \code{(Y + Y * 0.2)} to 0.8, and if smaller, cancels the run. +If \code{delay_evaluation = 200}, then the first time the policy will be applied +is at interval 200. + +Now, consider a Bandit policy with \code{slack_amount = 0.2} and +\code{evaluation_interval = 100}. If run 3 is the currently best performing run +with an AUC (performance metric) of 0.8 after 100 intervals, then any run +with an AUC less than 0.6 (\code{0.8 - 0.2}) after 100 iterations will be +terminated. Similarly, the \code{delay_evaluation} can also be used to delay the +first termination policy evaluation for a specific number of sequences. +} + +\section{Examples}{ + +In this example, the early termination policy is applied at every interval +when metrics are reported, starting at evaluation interval 5. Any run whose +best metric is less than +\code{1 / (1 + 0.1)} or 91\% of the best performing run will be terminated.\preformatted{early_termination_policy = bandit_policy(slack_factor = 0.1, + evaluation_interval = 1L, + delay_evaluation = 5L) +} +} + diff --git a/man/bayesian_parameter_sampling.Rd b/man/bayesian_parameter_sampling.Rd new file mode 100644 index 00000000..d7c2b2d5 --- /dev/null +++ b/man/bayesian_parameter_sampling.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{bayesian_parameter_sampling} +\alias{bayesian_parameter_sampling} +\title{Define Bayesian sampling over a hyperparameter search space} +\usage{ +bayesian_parameter_sampling(parameter_space) +} +\arguments{ +\item{parameter_space}{A named list containing each parameter and its +distribution, e.g. \code{list("parameter" = distribution)}.} +} +\value{ +The \code{BayesianParameterSampling} object. +} +\description{ +Bayesian sampling is based on the Bayesian optimization algorithm and makes +intelligent choices on the hyperparameter values to sample next. It picks +the sample based on how the previous samples performed, such that the new +sample improves the reported primary metric. +} +\section{Details}{ + +When you use Bayesian sampling, the number of concurrent runs has an impact +on the effectiveness of the tuning process. Typically, a smaller number of +concurrent runs can lead to better sampling convergence, since the smaller +degree of parallelism increases the number of runs that benefit from +previously completed runs. + +Bayesian sampling only supports \code{choice()}, \code{uniform()}, and \code{quniform()} +distributions over the search space. + +Bayesian sampling does not support any early termination policy. When +using Bayesian parameter sampling, \code{early_termination_policy} must be +\code{NULL}. +} + +\section{Examples}{ +\preformatted{param_sampling <- bayesian_parameter_sampling( { + 'learning_rate': uniform(0.05, 0.1), + 'batch_size': choice(16, 32, 64, 128) + } +) +} +} + +\seealso{ +\code{choice()}, \code{uniform()}, \code{quniform()} +} diff --git a/man/cancel_run.Rd b/man/cancel_run.Rd new file mode 100644 index 00000000..ea779493 --- /dev/null +++ b/man/cancel_run.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{cancel_run} +\alias{cancel_run} +\title{Cancel run} +\usage{ +cancel_run(run) +} +\arguments{ +\item{run}{run to be cancelled} +} +\value{ +TRUE if cancellation was successful, else FALSE +} +\description{ +Cancel run +} diff --git a/man/choice.Rd b/man/choice.Rd new file mode 100644 index 00000000..13982b55 --- /dev/null +++ b/man/choice.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{choice} +\alias{choice} +\title{Specify a discrete set of options to sample from} +\usage{ +choice(options) +} +\arguments{ +\item{options}{A list of discrete values to choose from, or +one or more comma-separated discrete values to choose from.} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a discrete set of options to sample the hyperparameters +from. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/container_registry.Rd b/man/container_registry.Rd new file mode 100644 index 00000000..3626d1eb --- /dev/null +++ b/man/container_registry.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/environment.R +\name{container_registry} +\alias{container_registry} +\title{Specify Azure Container Registry details} +\usage{ +container_registry(address = NULL, username = NULL, password = NULL) +} +\arguments{ +\item{address}{A string of the DNS name or IP address of the +Azure Container Registry (ACR).} + +\item{username}{A string of the username for ACR.} + +\item{password}{A string of the password for ACR.} +} +\value{ +The \code{ContainerRegistry} object. +} +\description{ +Returns a \code{ContainerRegistry} object with the details for an +Azure Container Registry (ACR). This is needed when a custom +Docker image used for training or deployment is located in +a private image registry. Provide a \code{ContainerRegistry} object +to the \code{image_registry_details} parameter of either \code{r_environment()} +or \code{estimator()}. +} +\seealso{ +\code{r_environment()}, \code{estimator()} +} diff --git a/man/create_aks_compute.Rd b/man/create_aks_compute.Rd new file mode 100644 index 00000000..5645175d --- /dev/null +++ b/man/create_aks_compute.Rd @@ -0,0 +1,85 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{create_aks_compute} +\alias{create_aks_compute} +\title{Create an AksCompute cluster} +\usage{ +create_aks_compute(workspace, cluster_name, agent_count = NULL, + vm_size = NULL, ssl_cname = NULL, ssl_cert_pem_file = NULL, + ssl_key_pem_file = NULL, location = NULL, + vnet_resourcegroup_name = NULL, vnet_name = NULL, + subnet_name = NULL, service_cidr = NULL, dns_service_ip = NULL, + docker_bridge_cidr = NULL) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{cluster_name}{A string of the name of the cluster.} + +\item{agent_count}{An integer of the number of agents (VMs) to host +containers. Defaults to \code{3}.} + +\item{vm_size}{A string of the size of agent VMs. More details can be found +\href{https://aka.ms/azureml-vm-details}{here}. +Note that not all sizes are available in all regions, as detailed in the +aformentioned link. Defaults to \code{'Standard_D3_v2'}.} + +\item{ssl_cname}{A string of a CName to use if enabling SSL validation on +the cluster. Must provide all three - CName, cert file, and key file - to +enable SSL validation.} + +\item{ssl_cert_pem_file}{A string of a file path to a file containing cert +information for SSL validation. Must provide all three - CName, cert file, +and key file - to enable SSL validation.} + +\item{ssl_key_pem_file}{A string of a file path to a file containing key +information for SSL validation. Must provide all three - CName, cert file, +and key file - to enable SSL validation.} + +\item{location}{A string of the location to provision the cluster in. If not +specified, defaults to the workspace location. Available regions for this +compute can be found here: +"https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=kubernetes-service".} + +\item{vnet_resourcegroup_name}{A string of the name of the resource group +where the virtual network is located.} + +\item{vnet_name}{A string of the name of the virtual network.} + +\item{subnet_name}{A string of the name of the subnet inside the vnet.} + +\item{service_cidr}{A string of a CIDR notation IP range from which to assign +service cluster IPs.} + +\item{dns_service_ip}{A string of the container's DNS server IP address.} + +\item{docker_bridge_cidr}{A string of a CIDR notation IP for Docker bridge.} +} +\value{ +An \code{AksCompute} object. +} +\description{ +Provision an Azure Kubernetes Service instance (AksCompute) as a compute +target for web service deployment. AksCompute is recommended for high-scale +production deployments and provides fast response time and autoscaling of +the deployed service. Cluster autoscaling isn't supported through the Azure +ML R SDK. To change the nodes in the AksCompute cluster, use the UI for the +cluster in the Azure portal. Once created, the cluster can be reused for +multiple deployments. +} +\section{Details}{ + +For more information on using an AksCompute resource within a virtual +network, see +\href{https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#use-azure-kubernetes-service-aks}{Secure Azure ML experimentation and inference jobs within an Azure Virtual Network} +} + +\section{Examples}{ + +Create an AksCompute cluster using the default configuration (you can also +provide parameters to customize this).\preformatted{ws <- load_workspace_from_config() +compute_target <- create_aks_compute(ws, cluster_name = 'mycluster') +wait_for_provisioning_completion(compute_target) +} +} + diff --git a/man/create_aml_compute.Rd b/man/create_aml_compute.Rd new file mode 100644 index 00000000..cf53161a --- /dev/null +++ b/man/create_aml_compute.Rd @@ -0,0 +1,93 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{create_aml_compute} +\alias{create_aml_compute} +\title{Create an AmlCompute cluster} +\usage{ +create_aml_compute(workspace, cluster_name, vm_size, + vm_priority = "dedicated", min_nodes = 0, max_nodes = NULL, + idle_seconds_before_scaledown = NULL, admin_username = NULL, + admin_user_password = NULL, admin_user_ssh_key = NULL, + vnet_resourcegroup_name = NULL, vnet_name = NULL, + subnet_name = NULL, tags = NULL, description = NULL) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{cluster_name}{A string of the name of the cluster.} + +\item{vm_size}{A string of the size of agent VMs. More details can be found +\href{https://aka.ms/azureml-vm-details}{here}. +Note that not all sizes are available in all regions, as detailed in the +aformentioned link. Defaults to \code{'Standard_NC6'}.} + +\item{vm_priority}{A string of either \code{'dedicated'} or \code{'lowpriority'} to +use either dedicated or low-priority VMs. Defaults to \code{'dedicated'}.} + +\item{min_nodes}{An integer of the minimum number of nodes to use on the +cluster. If not specified, will default to \code{0}.} + +\item{max_nodes}{An integer of the maximum number of nodes to use on the +cluster.} + +\item{idle_seconds_before_scaledown}{An integer of the node idle time in +seconds before scaling down the cluster. Defaults to \code{120}.} + +\item{admin_username}{A string of the name of the administrator user account +that can be used to SSH into nodes.} + +\item{admin_user_password}{A string of the password of the administrator user +account.} + +\item{admin_user_ssh_key}{A string of the SSH public key of the administrator +user account.} + +\item{vnet_resourcegroup_name}{A string of the name of the resource group +where the virtual network is located.} + +\item{vnet_name}{A string of the name of the virtual network.} + +\item{subnet_name}{A string of the name of the subnet inside the vnet.} + +\item{tags}{A named list of tags for the cluster, e.g. +\code{list("tag" = "value")}.`} + +\item{description}{A string of the description for the cluster.} +} +\value{ +The \code{AmlCompute} object. +} +\description{ +Provision Azure Machine Learning Compute (AmlCompute) as a compute target +for training. AmlCompute is a managed-compute infrastructure that allows the +user to easily create a single or multi-node compute. To create a persistent +AmlCompute resource that can be reused across jobs, make sure to specify the +\code{vm_size} and \code{max_nodes} parameters. The compute can then be shared with +other users in the workspace and is kept between jobs. If \code{min_nodes = 0}, +the compute autoscales down to zero nodes when it isn't used, and scales up +automatically when a job is submitted. + +AmlCompute has default limits, such as the number of cores that can be +allocated. For more information, see +\href{https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas}{Manage and request quotas for Azure resources}. +} +\section{Details}{ + +For more information on using an Azure Machine Learning Compute resource +in a virtual network, see +\href{https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-enable-virtual-network#use-a-machine-learning-compute-instance}{Secure Azure ML experimentation and inference jobs within an Azure Virtual Network}. +} + +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +compute_target <- create_aml_compute(ws, + cluster_name = 'mycluster', + vm_size = 'STANDARD_D2_V2', + max_nodes = 1) +wait_for_provisioning_completion(compute_target, show_output = TRUE) +} +} + +\seealso{ +\code{wait_for_provisioning_completion()} +} diff --git a/man/create_workspace.Rd b/man/create_workspace.Rd new file mode 100644 index 00000000..483e9156 --- /dev/null +++ b/man/create_workspace.Rd @@ -0,0 +1,103 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{create_workspace} +\alias{create_workspace} +\title{Create a new Azure Machine Learning workspace} +\usage{ +create_workspace(name, subscription_id = NULL, resource_group = NULL, + location = NULL, create_resource_group = TRUE, + friendly_name = NULL, storage_account = NULL, key_vault = NULL, + app_insights = NULL, container_registry = NULL, exist_ok = FALSE, + show_output = TRUE) +} +\arguments{ +\item{name}{A string of the new workspace name. Workspace name has to be +between 2 and 32 characters of letters and numbers.} + +\item{subscription_id}{A string of the subscription ID of the containing +subscription for the new workspace. The parameter is required if the user has +access to more than one subscription.} + +\item{resource_group}{A string of the Azure resource group that is containing +the workspace. The parameter defaults to a mutation of the workspace name.} + +\item{location}{A string of the location of the workspace. The parameter +defaults to the resource group location. The location has to be a supported +region for Azure Machine Learning Services.} + +\item{create_resource_group}{If \code{TRUE} the resource group will be created +if it doesn't exist.} + +\item{friendly_name}{A string of the friendly name for the workspace that +can be displayed in the UI.} + +\item{storage_account}{A string of an existing storage account in the Azure +resource ID format. The storage will be used by the workspace to save run +outputs, code, logs etc. If \code{NULL} a new storage will be created.} + +\item{key_vault}{A string of an existing key vault in the Azure resource ID +format. The key vault will be used by the workspace to store credentials +added to the workspace by the users. If \code{NULL} a new key vault will be +created.} + +\item{app_insights}{A string of an existing Application Insights in the Azure +resource ID format. The Application Insights will be used by the workspace to +log webservices events. If \code{NULL} a new Application Insights will be created.} + +\item{container_registry}{A string of an existing container registry in the +Azure resource ID format. The container registry will be used by the +workspace to pull and push both experimentation and webservices images. If +\code{NULL} a new container registry will be created.} + +\item{exist_ok}{If \code{TRUE} the method will not fail if the workspace already +exists.} + +\item{show_output}{If \code{TRUE} the method will print out incremental progress +of method.} +} +\value{ +The \code{Workspace} object. +} +\description{ +Create a new Azure Machine Learning workspace. Throws an exception if the +workspace already exists or any of the workspace requirements are not +satisfied. When you create new workspace, it automatically creates several +Azure resources that are used in the workspace: +\itemize{ +\item Azure Container Registry: Registers Docker containers that you use during +training and when you deploy a model. To minimize costs, ACR is +lazy-loaded until deployment images are created. +\item Azure Storage account: Used as the default datastore for the workspace. +\item Azure Application Insights: Stores monitoring information about your +models. +\item Azure Key Vault: Stores secrets that are used by compute targets and other +sensitive information that's needed by the workspace. +} +} +\section{Usage}{ + +The first example requires only minimal specification, and all dependent +resources as well as the resource group will be created automatically.\preformatted{ws <- create_workspace(name = 'myworkspace', + subscription_id = '', + resource_group = 'myresourcegroup', + location = 'eastus2') +} + +The following example shows how to reuse existing Azure resources by making +use of all parameters utilizing the Azure resource ID format. The specific +Azure resource IDs can be retrieved through the Azure Portal or SDK. This +assumes that the resource group, storage account, key vault, App Insights +and container registry already exist.\preformatted{ws <- create_workspace( + name = 'myworkspace', + subscription_id = '', + resource_group = 'myresourcegroup', + create_resource_group = FALSE, + location = 'eastus2', + friendly_name = 'My workspace', + storage_account = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.storage/storageaccounts/mystorageaccount', + key_vault = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.keyvault/vaults/mykeyvault', + app_insights = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.insights/components/myappinsights', + container_registry = 'subscriptions//resourcegroups/myresourcegroup/providers/microsoft.containerregistry/registries/mycontainerregistry') +} +} + diff --git a/man/delete_compute.Rd b/man/delete_compute.Rd new file mode 100644 index 00000000..60fb528e --- /dev/null +++ b/man/delete_compute.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{delete_compute} +\alias{delete_compute} +\title{Delete a cluster} +\usage{ +delete_compute(cluster) +} +\arguments{ +\item{cluster}{The \code{AmlCompute} or \code{AksCompute} object.} +} +\description{ +Remove the compute object from its associated workspace and delete the +corresponding cloud-based resource. +} +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +compute_target <- get_compute(ws, cluster_name = 'mycluster') +delete_compute(compute_target) +} +} + diff --git a/man/delete_local_webservice.Rd b/man/delete_local_webservice.Rd new file mode 100644 index 00000000..6b173023 --- /dev/null +++ b/man/delete_local_webservice.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-local.R +\name{delete_local_webservice} +\alias{delete_local_webservice} +\title{Delete this LocalWebservice from the local machine. +This function call is not asynchronous; it runs until the service is deleted.} +\usage{ +delete_local_webservice(webservice, delete_cache = TRUE, + delete_image = FALSE) +} +\arguments{ +\item{webservice}{LocalWebservice object.} + +\item{delete_cache}{Delete temporary files cached for the service.} + +\item{delete_image}{Delete the service's Docker image.} +} +\description{ +Delete this LocalWebservice from the local machine. +This function call is not asynchronous; it runs until the service is deleted. +} diff --git a/man/delete_model.Rd b/man/delete_model.Rd new file mode 100644 index 00000000..7f1cba86 --- /dev/null +++ b/man/delete_model.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{delete_model} +\alias{delete_model} +\title{Delete this model from its associated workspace.} +\usage{ +delete_model(model) +} +\arguments{ +\item{model}{The model to download} +} +\description{ +Delete this model from its associated workspace. +} diff --git a/man/delete_secrets.Rd b/man/delete_secrets.Rd new file mode 100644 index 00000000..4f08e2b1 --- /dev/null +++ b/man/delete_secrets.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/keyvault.R +\name{delete_secrets} +\alias{delete_secrets} +\title{Delete secrets from a keyvault} +\usage{ +delete_secrets(keyvault, secrets) +} +\arguments{ +\item{keyvault}{The \code{Keyvault} object.} + +\item{secrets}{A vector of secret names.} +} +\description{ +Delete secrets from the keyvault associated with the workspace for +a specified set of secret names. +} diff --git a/man/delete_webservice.Rd b/man/delete_webservice.Rd new file mode 100644 index 00000000..1db07c4b --- /dev/null +++ b/man/delete_webservice.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{delete_webservice} +\alias{delete_webservice} +\title{Delete this Webservice from its associated workspace.} +\usage{ +delete_webservice(webservice) +} +\arguments{ +\item{webservice}{The webservice object.} +} +\description{ +Delete this Webservice from its associated workspace. +} diff --git a/man/delete_workspace.Rd b/man/delete_workspace.Rd new file mode 100644 index 00000000..ada96e52 --- /dev/null +++ b/man/delete_workspace.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{delete_workspace} +\alias{delete_workspace} +\title{Delete a workspace} +\usage{ +delete_workspace(workspace, delete_dependent_resources = FALSE, + no_wait = FALSE) +} +\arguments{ +\item{workspace}{The \code{Workspace} object of the workspace to delete.} + +\item{delete_dependent_resources}{If \code{TRUE} the workspace's associated +resources, i.e. ACR, storage account, key value, and application insights +will also be deleted.} + +\item{no_wait}{If \code{FALSE} do not wait for the workspace deletion to complete.} +} +\description{ +Delete the Azure Machine Learning workspace resource. \code{delete_workspace()} +can also delete the workspace's associated resources. +} diff --git a/man/deploy_model.Rd b/man/deploy_model.Rd new file mode 100644 index 00000000..091c1f12 --- /dev/null +++ b/man/deploy_model.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{deploy_model} +\alias{deploy_model} +\title{Deploy a Webservice from zero or more model objects.} +\usage{ +deploy_model(workspace, name, models, inference_config, + deployment_config = NULL, deployment_target = NULL) +} +\arguments{ +\item{workspace}{A Workspace object to associate the Webservice with} + +\item{name}{The name to give the deployed service. Must be unique to the +workspace, only consist of lowercase letters, numbers, or dashes, start with +a letter, and be between 3 and 32 characters long.} + +\item{models}{A list of model objects. Can be an empty list.} + +\item{inference_config}{An InferenceConfig object used to determine required +model properties.} + +\item{deployment_config}{A WebserviceDeploymentConfiguration used to +configure the webservice. If one is not provided, an empty configuration +object will be used based on the desired target.} + +\item{deployment_target}{A azureml.core.ComputeTarget to deploy the +Webservice to. As Azure Container Instances has no associated +azureml.core.ComputeTarget, leave this parameter as None to deploy to Azure +Container Instances.} +} +\value{ +A Webservice object corresponding to the deployed webservice +} +\description{ +Deploy a Webservice from zero or more model objects. +} diff --git a/man/deserialize_to_model.Rd b/man/deserialize_to_model.Rd new file mode 100644 index 00000000..14057882 --- /dev/null +++ b/man/deserialize_to_model.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{deserialize_to_model} +\alias{deserialize_to_model} +\title{Convert a json object into a Model object.} +\usage{ +deserialize_to_model(workspace, model_payload) +} +\arguments{ +\item{workspace}{The workspace object the model is registered under} + +\item{model_payload}{A json object to convert to a Model object} +} +\value{ +The Model representation of the provided json object +} +\description{ +Convert a json object into a Model object. +} diff --git a/man/deserialize_to_webservice.Rd b/man/deserialize_to_webservice.Rd new file mode 100644 index 00000000..df9fe3c8 --- /dev/null +++ b/man/deserialize_to_webservice.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{deserialize_to_webservice} +\alias{deserialize_to_webservice} +\title{Convert a json object into a Webservice object.} +\usage{ +deserialize_to_webservice(workspace, webservice_payload) +} +\arguments{ +\item{workspace}{The workspace object the Webservice is registered under} + +\item{webservice_payload}{A json object to convert to a Webservice object} +} +\value{ +The Webservice representation of the provided json object +} +\description{ +Convert a json object into a Webservice object. +} diff --git a/man/detach_aks_compute.Rd b/man/detach_aks_compute.Rd new file mode 100644 index 00000000..25f8f3dd --- /dev/null +++ b/man/detach_aks_compute.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{detach_aks_compute} +\alias{detach_aks_compute} +\title{Detach an AksCompute cluster from its associated workspace} +\usage{ +detach_aks_compute(cluster) +} +\arguments{ +\item{cluster}{The \code{AksCompute} object.} +} +\description{ +Detach the AksCompute cluster from its associated workspace. No +underlying cloud resource will be deleted; the association will +just be removed. +} diff --git a/man/download_file_from_run.Rd b/man/download_file_from_run.Rd new file mode 100644 index 00000000..4cbe835e --- /dev/null +++ b/man/download_file_from_run.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{download_file_from_run} +\alias{download_file_from_run} +\title{Download an associated file from storage.} +\usage{ +download_file_from_run(run, name, output_file_path = NULL) +} +\arguments{ +\item{run}{the run object} + +\item{name}{The name of the artifact to be downloaded} + +\item{output_file_path}{The local path where to store the artifact} +} +\description{ +Download an associated file from storage. +} diff --git a/man/download_files_from_run.Rd b/man/download_files_from_run.Rd new file mode 100644 index 00000000..08131af3 --- /dev/null +++ b/man/download_files_from_run.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{download_files_from_run} +\alias{download_files_from_run} +\title{Download files from a given storage prefix (folder name) or +the entire container if prefix is unspecified.} +\usage{ +download_files_from_run(run, prefix = NULL, output_directory = NULL, + output_paths = NULL, batch_size = 100L) +} +\arguments{ +\item{run}{the run object} + +\item{prefix}{the filepath prefix within the container from +which to download all artifacts} + +\item{output_directory}{optional directory that all artifact paths use +as a prefix} + +\item{output_paths}{optional filepaths in which to store the downloaded +artifacts. Should be unique and match length of paths.} + +\item{batch_size}{number of files to download per batch} +} +\description{ +Download files from a given storage prefix (folder name) or +the entire container if prefix is unspecified. +} diff --git a/man/download_from_datastore.Rd b/man/download_from_datastore.Rd new file mode 100644 index 00000000..3d93da24 --- /dev/null +++ b/man/download_from_datastore.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datastore.R +\name{download_from_datastore} +\alias{download_from_datastore} +\title{Download data from a datastore to the local file system} +\usage{ +download_from_datastore(datastore, target_path, prefix = NULL, + overwrite = FALSE, show_progress = TRUE) +} +\arguments{ +\item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} + +\item{target_path}{A string of the local directory to download the file to.} + +\item{prefix}{A string of the path to the folder in the blob container +or file store to download. If \code{NULL}, will download everything in the blob +container or file share} + +\item{overwrite}{If \code{TRUE}, overwrites any existing data at \code{target_path}.} + +\item{show_progress}{If \code{TRUE}, show progress of upload in the console.} +} +\value{ +An integer of the number of files successfully downloaded. +} +\description{ +Download data from the datastore to the local file system. +} diff --git a/man/download_model.Rd b/man/download_model.Rd new file mode 100644 index 00000000..8011af60 --- /dev/null +++ b/man/download_model.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{download_model} +\alias{download_model} +\title{Download model to target_dir of local file system.} +\usage{ +download_model(model, target_dir = ".", exist_ok = FALSE) +} +\arguments{ +\item{model}{The model to download} + +\item{target_dir}{Path to directory for where to download the model. +Defaults to "."} + +\item{exist_ok}{Boolean to replace downloaded dir/files if exists. +Defaults to FALSE} +} +\value{ +string path to file or folder of model +} +\description{ +Download model to target_dir of local file system. +} diff --git a/man/estimator.Rd b/man/estimator.Rd new file mode 100644 index 00000000..bd371e49 --- /dev/null +++ b/man/estimator.Rd @@ -0,0 +1,94 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/estimator.R +\name{estimator} +\alias{estimator} +\title{Create an estimator} +\usage{ +estimator(source_directory, compute_target = NULL, vm_size = NULL, + vm_priority = NULL, entry_script = NULL, script_params = NULL, + cran_packages = NULL, github_packages = NULL, + custom_url_packages = NULL, custom_docker_image = NULL, + image_registry_details = NULL, use_gpu = FALSE, + environment_variables = NULL, shm_size = NULL, + max_run_duration_seconds = NULL, environment = NULL) +} +\arguments{ +\item{source_directory}{A string of the local directory containing +experiment configuration and code files needed for the training job.} + +\item{compute_target}{The \code{AmlCompute} object for the compute target +where training will happen.} + +\item{vm_size}{A string of the VM size of the compute target that will be +created for the training job. The list of available VM sizes +are listed \href{https://docs.microsoft.com/azure/cloud-services/cloud-services-sizes-specs}{here}. +Provide this parameter if you want to create AmlCompute as the compute target +at run time, instead of providing an existing cluster to the \code{compute_target} +parameter. If \code{vm_size} is specified, a single-node cluster is automatically +created for your run and is deleted automatically once the run completes.} + +\item{vm_priority}{A string of either \code{'dedicated'} or \code{'lowpriority'} to +specify the VM priority of the compute target that will be created for the +training job. Defaults to \code{'dedicated'}. This takes effect only when the +\code{vm_size} parameter is specified.} + +\item{entry_script}{A string representing the relative path to the file used +to start training.} + +\item{script_params}{A named list of the command-line arguments to pass to +the training script specified in \code{entry_script}.} + +\item{cran_packages}{A character vector of CRAN packages to be installed.} + +\item{github_packages}{A character vector of GitHub packages to be installed.} + +\item{custom_url_packages}{A character vector of packages to be installed +from local directory or custom URL.} + +\item{custom_docker_image}{A string of the name of the Docker image from +which the image to use for training will be built. If not set, a default +CPU-based image will be used as the base image. To use an image from a +private Docker repository, you will also have to specify the +\code{image_registry_details} parameter.} + +\item{image_registry_details}{A \code{ContainerRegistry} object of the details of +the Docker image registry for the custom Docker image.} + +\item{use_gpu}{Indicates whether the environment to run the experiment should +support GPUs. If \code{TRUE}, a GPU-based default Docker image will be used in the +environment. If \code{FALSE}, a CPU-based image will be used. Default Docker +images (CPU or GPU) will only be used if the \code{custom_docker_image} parameter +is not set.} + +\item{environment_variables}{A named list of environment variables names +and values. These environment variables are set on the process where the user +script is being executed.} + +\item{shm_size}{A string for the size of the Docker container's shared +memory block. For more information, see +\href{https://docs.docker.com/engine/reference/run/}{Docker run reference}. +If not set, a default value of \code{'2g'} is used.} + +\item{max_run_duration_seconds}{An integer of the maximum allowed time for +the run. Azure ML will attempt to automatically cancel the run if it takes +longer than this value.} + +\item{environment}{The \code{Environment} object that configures the R +environment where the experiment is executed. This parameter is mutually +exclusive with the other environment-related parameters \code{custom_docker_image} +, \code{image_registry_details}, \code{use_gpu}, \code{environment_variables}, \code{shm_size}, +\code{cran_packages}, \code{github_packages}, and \code{custom_url_packages} and if set +will take precedence over those parameters.} +} +\value{ +The \code{Estimator} object. +} +\description{ +An Estimator wraps run configuration information for specifying details +of executing an R script. Running an Estimator experiment +(using \code{submit_experiment()}) will return a \code{ScriptRun} object and +execute your training script on the specified compute target. +} +\seealso{ +\code{r_environment()}, \code{container_registry()}, \code{submit_experiment()} +} diff --git a/man/experiment.Rd b/man/experiment.Rd new file mode 100644 index 00000000..15259e77 --- /dev/null +++ b/man/experiment.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiment.R +\name{experiment} +\alias{experiment} +\title{Create an Azure Machine Learning experiment} +\usage{ +experiment(workspace, name) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{name}{A string of the experiment name. The name must be between +3-36 characters, start with a letter or number, and can only contain +letters, numbers, underscores, and dashes.} +} +\value{ +The \code{Experiment} object. +} +\description{ +An experiment is a grouping of many runs from a specified script. +} +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +exp <- experiment(ws, name = 'myexperiment') +} +} + +\seealso{ +\code{submit_experiment()} +} diff --git a/man/generate_docker_file.Rd b/man/generate_docker_file.Rd new file mode 100644 index 00000000..d7ae0475 --- /dev/null +++ b/man/generate_docker_file.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/environment.R +\name{generate_docker_file} +\alias{generate_docker_file} +\title{Generate a dockerfile string to build the image for training.} +\usage{ +generate_docker_file(custom_docker_image = NULL, cran_packages = NULL, + github_packages = NULL, custom_url_packages = NULL) +} +\arguments{ +\item{custom_docker_image}{The name of the docker image from which the image +to use for training will be built. If not set, a default CPU based image will +be used as the base image.} + +\item{cran_packages}{character vector of cran packages to be installed.} + +\item{github_packages}{character vector of github packages to be installed.} + +\item{custom_url_packages}{character vector of packages to be installed from +local, directory or custom url.} +} +\description{ +Generate a dockerfile string to build the image for training. +} diff --git a/man/generate_new_webservice_key.Rd b/man/generate_new_webservice_key.Rd new file mode 100644 index 00000000..50acd6d5 --- /dev/null +++ b/man/generate_new_webservice_key.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{generate_new_webservice_key} +\alias{generate_new_webservice_key} +\title{Regenerate one of the Webservice's keys. Must specify either 'Primary' or +'Secondary' key.} +\usage{ +generate_new_webservice_key(webservice, key_type) +} +\arguments{ +\item{webservice}{The webservice object.} + +\item{key_type}{Which key to regenerate. Options are 'Primary' or +'Secondary'} +} +\description{ +Regenerate one of the Webservice's keys. Must specify either 'Primary' or +'Secondary' key. +} diff --git a/man/generate_score_python_wrapper.Rd b/man/generate_score_python_wrapper.Rd new file mode 100644 index 00000000..aea9777b --- /dev/null +++ b/man/generate_score_python_wrapper.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{generate_score_python_wrapper} +\alias{generate_score_python_wrapper} +\title{Generate _generated_score.py file for the corresponding entry_script file} +\usage{ +generate_score_python_wrapper(entry_script, source_directory) +} +\arguments{ +\item{entry_script}{Path to local file that contains the code to run for +the image.} + +\item{source_directory}{paths to folders that contains all files to +create the image.} +} +\description{ +Generate _generated_score.py file for the corresponding entry_script file +} diff --git a/man/get_aks_compute_credentials.Rd b/man/get_aks_compute_credentials.Rd new file mode 100644 index 00000000..12450d6d --- /dev/null +++ b/man/get_aks_compute_credentials.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{get_aks_compute_credentials} +\alias{get_aks_compute_credentials} +\title{Get the credentials for an AksCompute cluster} +\usage{ +get_aks_compute_credentials(cluster) +} +\arguments{ +\item{cluster}{The \code{AksCompute} object.} +} +\value{ +A named list of the cluster credentials. +} +\description{ +Retrieved the credentials for an AksCompute cluster. +} diff --git a/man/get_best_run_by_primary_metric.Rd b/man/get_best_run_by_primary_metric.Rd new file mode 100644 index 00000000..cf379816 --- /dev/null +++ b/man/get_best_run_by_primary_metric.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{get_best_run_by_primary_metric} +\alias{get_best_run_by_primary_metric} +\title{Return the best performing run amongst all completed runs} +\usage{ +get_best_run_by_primary_metric(hyperdrive_run, include_failed = TRUE, + include_canceled = TRUE) +} +\arguments{ +\item{hyperdrive_run}{The \code{HyperDriveRun} object.} + +\item{include_failed}{If \code{TRUE}, include the failed runs.} + +\item{include_canceled}{If \code{TRUE}, include the canceled runs.} +} +\value{ +The \code{Run} object. +} +\description{ +Find and return the run that corresponds to the best performing run +amongst all the completed runs. + +The best performing run is identified solely based on the primary metric +parameter specified in the \code{HyperDriveConfig} (\code{primary_metric_name}). +The \code{PrimaryMetricGoal} governs whether the minimum or maximum of the +primary metric is used. To do a more detailed analysis of all the +run metrics launched by this HyperDrive run, use \code{get_child_run_metrics()}. +Only one of the runs is returned from \code{get_best_run_by_primary_metric()}, +even if several of the runs launched by this HyperDrive run reached +the same best metric. +} diff --git a/man/get_child_run_hyperparameters.Rd b/man/get_child_run_hyperparameters.Rd new file mode 100644 index 00000000..d34b6720 --- /dev/null +++ b/man/get_child_run_hyperparameters.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{get_child_run_hyperparameters} +\alias{get_child_run_hyperparameters} +\title{Get the hyperparameters for all child runs} +\usage{ +get_child_run_hyperparameters(hyperdrive_run) +} +\arguments{ +\item{hyperdrive_run}{The \code{HyperDriveRun} object.} +} +\value{ +The named list of hyperparameters where element name +is the run_id, e.g. \code{list("run_id" = hyperparameters)}. +} +\description{ +Return the hyperparameters for all the child runs of the +HyperDrive run. +} diff --git a/man/get_child_run_metrics.Rd b/man/get_child_run_metrics.Rd new file mode 100644 index 00000000..92015740 --- /dev/null +++ b/man/get_child_run_metrics.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{get_child_run_metrics} +\alias{get_child_run_metrics} +\title{Get the metrics from all child runs} +\usage{ +get_child_run_metrics(hyperdrive_run) +} +\arguments{ +\item{hyperdrive_run}{The \code{HyperDriveRun} object.} +} +\value{ +The named list of metrics where element name is +the run_id, e.g. \code{list("run_id" = metrics)}. +} +\description{ +Return the metrics from all the child runs of the +HyperDrive run. +} diff --git a/man/get_child_runs_sorted_by_primary_metric.Rd b/man/get_child_runs_sorted_by_primary_metric.Rd new file mode 100644 index 00000000..2d6ec7bd --- /dev/null +++ b/man/get_child_runs_sorted_by_primary_metric.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{get_child_runs_sorted_by_primary_metric} +\alias{get_child_runs_sorted_by_primary_metric} +\title{Get the child runs sorted in descending order by +best primary metric} +\usage{ +get_child_runs_sorted_by_primary_metric(hyperdrive_run, top = 0L, + reverse = FALSE, discard_no_metric = FALSE) +} +\arguments{ +\item{hyperdrive_run}{The \code{HyperDriveRun} object.} + +\item{top}{An integer of the number of top child runs to be returned. If \code{0} +(the default value), all child runs will be returned.} + +\item{reverse}{If \code{TRUE}, the order will be reversed. This sorting only +impacts child runs with the primary metric.} + +\item{discard_no_metric}{If \code{FALSE}, child runs without the primary metric +will be appended to the list returned.} +} +\value{ +The named list of child runs. +} +\description{ +Return a list of child runs of the HyperDrive run sorted by their best +primary metric. The sorting is done according to the primary metric and +its goal: if it is maximize, then the child runs are returned in descending +order of their best primary metric. If \code{reverse = TRUE}, the order is +reversed. Each child in the result has run id, hyperparameters, best primary +metric value, and status. + +Child runs without the primary metric are discarded when +\code{discard_no_metric = TRUE}. Otherwise, they are appended to the list behind +other child runs with the primary metric. Note that the reverse option has no +impact on them. +} diff --git a/man/get_compute.Rd b/man/get_compute.Rd new file mode 100644 index 00000000..1a5ff010 --- /dev/null +++ b/man/get_compute.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{get_compute} +\alias{get_compute} +\title{Get an existing compute cluster} +\usage{ +get_compute(workspace, cluster_name) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{cluster_name}{A string of the name of the cluster.} +} +\value{ +The \code{AmlCompute} or \code{AksCompute} object. +} +\description{ +Returns an \code{AmlCompute} or \code{AksCompute} object for an existing compute +resource. If the compute target doesn't exist, the function will return +\code{NULL}. +} +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +compute_target <- get_compute(ws, cluster_name = 'mycluster') +} +} + diff --git a/man/get_current_run.Rd b/man/get_current_run.Rd new file mode 100644 index 00000000..b6a45259 --- /dev/null +++ b/man/get_current_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{get_current_run} +\alias{get_current_run} +\title{Gets the context object for a run} +\usage{ +get_current_run(allow_offline = TRUE) +} +\arguments{ +\item{allow_offline}{Allow the service context to fall back to offline mode +so that the training script can be tested locally without submitting a job +with the SDK.} +} +\value{ +The run object. +} +\description{ +Gets the context object for a run +} diff --git a/man/get_datastore.Rd b/man/get_datastore.Rd new file mode 100644 index 00000000..9fc76c1a --- /dev/null +++ b/man/get_datastore.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datastore.R +\name{get_datastore} +\alias{get_datastore} +\title{Get an existing datastore} +\usage{ +get_datastore(workspace, datastore_name) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{datastore_name}{A string of the name of the datastore.} +} +\value{ +The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object. +} +\description{ +Get the corresponding datastore object for an existing +datastore by name from the given workspace. +} diff --git a/man/get_default_datastore.Rd b/man/get_default_datastore.Rd new file mode 100644 index 00000000..31619c8e --- /dev/null +++ b/man/get_default_datastore.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{get_default_datastore} +\alias{get_default_datastore} +\title{Get the default datastore for a workspace} +\usage{ +get_default_datastore(workspace) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} +} +\value{ +The default \code{Datastore} object. +} +\description{ +Returns the default datastore associated with the workspace. + +When you create a workspace, an Azure blob container and Azure file share +are registered to the workspace with the names \code{workspaceblobstore} and +\code{workspacefilestore}, respectively. They store the connection information +of the blob container and the file share that is provisioned in the storage +account attached to the workspace. The \code{workspaceblobstore} is set as the +default datastore, and remains the default datastore unless you set a new +datastore as the default with \code{set_default_datastore()}. +} +\section{Examples}{ + +Get the default datastore for the datastore:\preformatted{ws <- load_workspace_from_config() +ds <- get_default_datastore(ws) +} + +If you have not changed the default datastore for the workspace, the +following code will return the same datastore object as the above +example:\preformatted{ws <- load_workspace_from_config() +ds <- get_datastore(ws, datastore_name = 'workspaceblobstore') +} +} + diff --git a/man/get_default_keyvault.Rd b/man/get_default_keyvault.Rd new file mode 100644 index 00000000..07a8fc43 --- /dev/null +++ b/man/get_default_keyvault.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{get_default_keyvault} +\alias{get_default_keyvault} +\title{Get the default keyvault for a workspace} +\usage{ +get_default_keyvault(workspace) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} +} +\value{ +The \code{Keyvault} object. +} +\description{ +Returns a \code{Keyvault} object representing the default +\href{https://docs.microsoft.com/en-us/azure/key-vault/key-vault-overview}{Azure Key Vault} +associated with the workspace. +} +\seealso{ +\code{set_secrets()}, \code{get_secrets()}, \code{list_secrets()}, \code{delete_secrets()} +} diff --git a/man/get_environment.Rd b/man/get_environment.Rd new file mode 100644 index 00000000..a79dc532 --- /dev/null +++ b/man/get_environment.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/environment.R +\name{get_environment} +\alias{get_environment} +\title{Get an existing environment} +\usage{ +get_environment(workspace, name, version = NULL) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{name}{A string of the name of the environment.} + +\item{version}{A string of the version of the environment.} +} +\value{ +The \code{Environment} object. +} +\description{ +Returns an \code{Environment} object for an existing environment in +the workspace. +} +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +env <- get_environment(ws, name = 'myenv', version = '1') +} +} + diff --git a/man/get_model.Rd b/man/get_model.Rd new file mode 100644 index 00000000..c2b9c432 --- /dev/null +++ b/man/get_model.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{get_model} +\alias{get_model} +\title{Retrieve the Model object from the cloud.} +\usage{ +get_model(workspace, name = NULL, id = NULL, tags = NULL, + properties = NULL, version = NULL, run_id = NULL) +} +\arguments{ +\item{workspace}{The workspace object containing the Model to retrieve} + +\item{name}{Will retrieve the latest model with the corresponding name, if +it exists} + +\item{id}{Will retrieve the model with the corresponding ID, if it exists} + +\item{tags}{Optional, will filter based on the provided list, searching by +either 'key' or '[key, value]'.} + +\item{properties}{Optional, will filter based on the provided list, +searching by either 'key' or '[key, value]'.} + +\item{version}{When provided along with name, will get the specific version +of the specified named model, if it exists} + +\item{run_id}{Optional, will filter based on the provided ID.} +} +\value{ +A model object, if one is found in the provided workspace +} +\description{ +Retrieve the Model object from the cloud. +} diff --git a/man/get_model_package_container_registry.Rd b/man/get_model_package_container_registry.Rd new file mode 100644 index 00000000..8155fd9f --- /dev/null +++ b/man/get_model_package_container_registry.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{get_model_package_container_registry} +\alias{get_model_package_container_registry} +\title{Return a ContainerRegistry object for where the image +(or base image, for Dockerfile packages) is stored.} +\usage{ +get_model_package_container_registry(package) +} +\arguments{ +\item{package}{Package created with model(s) and dependencies.} +} +\value{ +ContainerRegistry object +} +\description{ +Return a ContainerRegistry object for where the image +(or base image, for Dockerfile packages) is stored. +} diff --git a/man/get_model_package_creation_logs.Rd b/man/get_model_package_creation_logs.Rd new file mode 100644 index 00000000..3e4db265 --- /dev/null +++ b/man/get_model_package_creation_logs.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{get_model_package_creation_logs} +\alias{get_model_package_creation_logs} +\title{Retrieve the package creation logs.} +\usage{ +get_model_package_creation_logs(package, decode = TRUE, offset = 0) +} +\arguments{ +\item{package}{Package created with model(s) and dependencies.} + +\item{decode}{Whether to decode the raw log bytes to a string.} + +\item{offset}{Byte offset from which to start reading the logs.} +} +\value{ +Package creation logs. +} +\description{ +Retrieve the package creation logs. +} diff --git a/man/get_run.Rd b/man/get_run.Rd new file mode 100644 index 00000000..c72f0eb0 --- /dev/null +++ b/man/get_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{get_run} +\alias{get_run} +\title{Gets the Run object from a given run id} +\usage{ +get_run(experiment, run_id) +} +\arguments{ +\item{experiment}{The containing experiment.} + +\item{run_id}{The run id for the run.} +} +\value{ +The run object. +} +\description{ +Gets the Run object from a given run id +} diff --git a/man/get_run_details.Rd b/man/get_run_details.Rd new file mode 100644 index 00000000..91348508 --- /dev/null +++ b/man/get_run_details.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{get_run_details} +\alias{get_run_details} +\title{Get the definition, status information, current log files and other details +of the run.} +\usage{ +get_run_details(run) +} +\arguments{ +\item{run}{the run object} +} +\value{ +Return the details for the run +} +\description{ +Get the definition, status information, current log files and other details +of the run. +} diff --git a/man/get_run_details_with_logs.Rd b/man/get_run_details_with_logs.Rd new file mode 100644 index 00000000..8950d869 --- /dev/null +++ b/man/get_run_details_with_logs.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{get_run_details_with_logs} +\alias{get_run_details_with_logs} +\title{Return run status including log file content.} +\usage{ +get_run_details_with_logs(run) +} +\arguments{ +\item{run}{the run object} +} +\value{ +Returns the status for the run with log file contents +} +\description{ +Return run status including log file content. +} diff --git a/man/get_run_file_names.Rd b/man/get_run_file_names.Rd new file mode 100644 index 00000000..e65a342b --- /dev/null +++ b/man/get_run_file_names.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{get_run_file_names} +\alias{get_run_file_names} +\title{List the files that are stored in association with the run.} +\usage{ +get_run_file_names(run) +} +\arguments{ +\item{run}{the run object} +} +\value{ +The list of paths for existing artifacts +} +\description{ +List the files that are stored in association with the run. +} diff --git a/man/get_run_metrics.Rd b/man/get_run_metrics.Rd new file mode 100644 index 00000000..5d62be67 --- /dev/null +++ b/man/get_run_metrics.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{get_run_metrics} +\alias{get_run_metrics} +\title{Get the metrics for run} +\usage{ +get_run_metrics(run) +} +\arguments{ +\item{run}{Run object} +} +\value{ +named list containing metrics associated with the run. +} +\description{ +Get the metrics for run +} diff --git a/man/get_runs_in_experiment.Rd b/man/get_runs_in_experiment.Rd new file mode 100644 index 00000000..f15da699 --- /dev/null +++ b/man/get_runs_in_experiment.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiment.R +\name{get_runs_in_experiment} +\alias{get_runs_in_experiment} +\title{Return a generator of the runs for an experiment} +\usage{ +get_runs_in_experiment(experiment, type = NULL, tags = NULL, + properties = NULL, include_children = FALSE) +} +\arguments{ +\item{experiment}{The \code{Experiment} object.} + +\item{type}{Filter the returned generator of runs by the provided type.} + +\item{tags}{Filter runs by tags. A named list eg. list("tag" = "value").} + +\item{properties}{Filter runs by properties. A named list +eg. list("property" = "value").} + +\item{include_children}{By default, fetch only top-level runs. +Set to TRUE to list all runs.} +} +\value{ +The list of runs matching supplied filters. +} +\description{ +Return a generator of the runs for an experiment, in reverse +chronological order. +} diff --git a/man/get_secrets.Rd b/man/get_secrets.Rd new file mode 100644 index 00000000..7966472f --- /dev/null +++ b/man/get_secrets.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/keyvault.R +\name{get_secrets} +\alias{get_secrets} +\title{Get secrets from a keyvault} +\usage{ +get_secrets(keyvault, secrets) +} +\arguments{ +\item{keyvault}{The \code{Keyvault} object.} + +\item{secrets}{A vector of secret names.} +} +\value{ +A named list of found and not found secrets, where element +name corresponds to the secret name. If a secret was not found, the +corresponding element will be \code{NULL}. +} +\description{ +Returns the secret values from the keyvault associated with the +workspace for a given set of secret names. For runs submitted using +\code{submit_experiment()}, you can use \code{get_secrets_from_run()} instead, +as that method shortcuts workspace instantiation (since a submitted +run is aware of its workspace). +} diff --git a/man/get_secrets_from_run.Rd b/man/get_secrets_from_run.Rd new file mode 100644 index 00000000..2dd897cc --- /dev/null +++ b/man/get_secrets_from_run.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{get_secrets_from_run} +\alias{get_secrets_from_run} +\title{Get the secret values for a given list of secret names. +Get a dictionary of found and not found secrets for the list of names +provided.} +\usage{ +get_secrets_from_run(run, secrets) +} +\arguments{ +\item{run}{the run object} + +\item{secrets}{List of secret names to retrieve the values for} +} +\value{ +Returns a dictionary of found and not found secrets +} +\description{ +Get the secret values for a given list of secret names. +Get a dictionary of found and not found secrets for the list of names +provided. +} diff --git a/man/get_webservice.Rd b/man/get_webservice.Rd new file mode 100644 index 00000000..fe4572bb --- /dev/null +++ b/man/get_webservice.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{get_webservice} +\alias{get_webservice} +\title{Retrieve a cloud representation of a Webservice object associated with the +provided workspace. Will return an instance of a child class corresponding to +the specific type of the retrieved Webservice object.} +\usage{ +get_webservice(workspace, name) +} +\arguments{ +\item{workspace}{The workspace object containing the Webservice object to +retrieve} + +\item{name}{The name of the of the Webservice object to retrieve} +} +\value{ +The webservice object +} +\description{ +Retrieve a cloud representation of a Webservice object associated with the +provided workspace. Will return an instance of a child class corresponding to +the specific type of the retrieved Webservice object. +} diff --git a/man/get_webservice_keys.Rd b/man/get_webservice_keys.Rd new file mode 100644 index 00000000..756a03a6 --- /dev/null +++ b/man/get_webservice_keys.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{get_webservice_keys} +\alias{get_webservice_keys} +\title{Retrieve auth keys for this Webservice.} +\usage{ +get_webservice_keys(webservice) +} +\arguments{ +\item{webservice}{The webservice object.} +} +\value{ +The auth keys for this Webservice +} +\description{ +Retrieve auth keys for this Webservice. +} diff --git a/man/get_webservice_logs.Rd b/man/get_webservice_logs.Rd new file mode 100644 index 00000000..db4a765e --- /dev/null +++ b/man/get_webservice_logs.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{get_webservice_logs} +\alias{get_webservice_logs} +\title{Retrieve logs for the Webservice.} +\usage{ +get_webservice_logs(webservice, num_lines = 5000L) +} +\arguments{ +\item{webservice}{The webservice object.} + +\item{num_lines}{The maximum number of log lines to retrieve.} +} +\value{ +The logs for this Webservice +} +\description{ +Retrieve logs for the Webservice. +} diff --git a/man/get_webservice_token.Rd b/man/get_webservice_token.Rd new file mode 100644 index 00000000..06472224 --- /dev/null +++ b/man/get_webservice_token.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{get_webservice_token} +\alias{get_webservice_token} +\title{Retrieve auth token for this Webservice, scoped to the current user.} +\usage{ +get_webservice_token(webservice) +} +\arguments{ +\item{webservice}{The webservice object.} +} +\value{ +The auth token for this Webservice and when it should be +refreshed after. +} +\description{ +Retrieve auth token for this Webservice, scoped to the current user. +} diff --git a/man/get_workspace.Rd b/man/get_workspace.Rd new file mode 100644 index 00000000..3d99ebd7 --- /dev/null +++ b/man/get_workspace.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{get_workspace} +\alias{get_workspace} +\title{Get an existing workspace} +\usage{ +get_workspace(name, subscription_id = NULL, resource_group = NULL) +} +\arguments{ +\item{name}{A string of the workspace name to get.} + +\item{subscription_id}{A string of the subscription ID to use. The parameter +is required if the user has access to more than one subscription.} + +\item{resource_group}{A string of the resource group to use. If \code{NULL} the +method will search all resource groups in the subscription.} +} +\value{ +The \code{Workspace} object. +} +\description{ +Returns a \code{Workspace} object for an existing Azure Machine Learning +workspace. Throws an exception if the workpsace doesn't exist or the +required fields don't lead to a uniquely identifiable workspace. +} diff --git a/man/get_workspace_details.Rd b/man/get_workspace_details.Rd new file mode 100644 index 00000000..54dd8cd9 --- /dev/null +++ b/man/get_workspace_details.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{get_workspace_details} +\alias{get_workspace_details} +\title{Get the details of a workspace} +\usage{ +get_workspace_details(workspace) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} +} +\value{ +A named list of the workspace details. +} +\description{ +Returns the details of the workspace. +} +\section{Details}{ + +The returned list contains the following named elements: +\itemize{ +\item \emph{id}: URI pointing to the workspace resource, containing subscription ID, +resource group, and workspace name. +\item \emph{name}: Workspace name. +\item \emph{location}: Workspace region. +\item \emph{type}: URI of the format \code{"{providerName}/workspaces"}. +\item \emph{workspaceid}: Workspace ID. +\item \emph{description}: Workspace description. +\item \emph{friendlyName}: Workspace friendly name. +\item \emph{creationTime}: Time the workspace was created, in ISO8601. +\item \emph{containerRegistry}: Workspace container registry. +\item \emph{keyVault}: Workspace key vault. +\item \emph{applicationInsights}: Workspace App Insights. +\item \emph{identityPrincipalId}: Workspace identity principal ID. +\item \emph{identityTenantId}: Workspace tenant ID. +\item \emph{identityType}: Workspace identity type. +\item \emph{storageAccount}: Workspace storage account. +} +} + diff --git a/man/grid_parameter_sampling.Rd b/man/grid_parameter_sampling.Rd new file mode 100644 index 00000000..73cbee21 --- /dev/null +++ b/man/grid_parameter_sampling.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{grid_parameter_sampling} +\alias{grid_parameter_sampling} +\title{Define grid sampling over a hyperparameter search space} +\usage{ +grid_parameter_sampling(parameter_space) +} +\arguments{ +\item{parameter_space}{A named list containing each parameter and its +distribution, e.g. \code{list("parameter" = distribution)}.} +} +\value{ +The \code{GridParameterSampling} object. +} +\description{ +Grid sampling performs a simple grid search over all feasible values in +the defined search space. It can only be used with hyperparameters +specified using \code{choice()}. +} +\section{Examples}{ +\preformatted{param_sampling <- grid_parameter_sampling( { + 'num_hidden_layers': choice(1, 2, 3), + 'batch_size': choice(16, 32) + } +) +} +} + +\seealso{ +\code{choice()} +} diff --git a/man/hyperdrive_config.Rd b/man/hyperdrive_config.Rd new file mode 100644 index 00000000..5508c6a7 --- /dev/null +++ b/man/hyperdrive_config.Rd @@ -0,0 +1,100 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{hyperdrive_config} +\alias{hyperdrive_config} +\title{Create a configuration for a HyperDrive run} +\usage{ +hyperdrive_config(hyperparameter_sampling, primary_metric_name, + primary_metric_goal, max_total_runs, max_concurrent_runs = NULL, + max_duration_minutes = 10080L, policy = NULL, estimator = NULL) +} +\arguments{ +\item{hyperparameter_sampling}{The hyperparameter sampling space. +Can be a \code{RandomParameterSampling}, \code{GridParameterSampling}, or +\code{BayesianParameterSampling} object.} + +\item{primary_metric_name}{A string of the name of the primary metric +reported by the experiment runs.} + +\item{primary_metric_goal}{The \code{PrimaryMetricGoal} object. This +parameter determines if the primary metric is to be minimized or +maximized when evaluating runs.} + +\item{max_total_runs}{An integer of the maximum total number of runs +to create. This is the upper bound; there may be fewer runs when the +sample space is smaller than this value. If both \code{max_total_runs} and +\code{max_duration_minutes} are specified, the hyperparameter tuning experiment +terminates when the first of these two thresholds is reached.} + +\item{max_concurrent_runs}{An integer of the maximum number of runs to +execute concurrently. If \code{NULL}, all runs are launched in parallel. +The number of concurrent runs is gated on the resources available in the +specified compute target. Hence, you need to ensure that the compute target +has the available resources for the desired concurrency.} + +\item{max_duration_minutes}{An integer of the maximum duration of the +HyperDrive run. Once this time is exceeded, any runs still executing are +cancelled. If both \code{max_total_runs} and \code{max_duration_minutes} are specified, +the hyperparameter tuning experiment terminates when the first of these two +thresholds is reached.} + +\item{policy}{The early termination policy to use. Can be either a +\code{BanditPolicy}, \code{MedianStoppingPolicy}, or \code{TruncationSelectionPolicy} +object. If \code{NULL} (the default), no early termination policy will be used. + +The \code{MedianStoppingPolicy} with \code{delay_evaluation of = 5} is a good +termination policy to start with. These are conservative settings that can +provide 25%-35% savings with no loss on primary metric +(based on our evaluation data).} + +\item{estimator}{The \code{Estimator} object.} +} +\value{ +The \code{HyperDriveConfig} object. +} +\description{ +The HyperDrive configuration includes information about hyperparameter +space sampling, termination policy, primary metric, estimator, and +the compute target to execute the experiment runs on. + +To submit the HyperDrive experiment, pass the \code{HyperDriveConfig} object +returned from this method to \code{submit_experiment()}. +} +\section{Examples}{ +\preformatted{# Load the workspace +ws <- load_workspace_from_config() + +# Get the compute target +compute_target <- get_compute(ws, cluster_name = 'mycluster') + +# Define the primary metric goal +goal = primary_metric_goal("MAXIMIZE") + +# Define the early termination policy +early_termination_policy = median_stopping_policy(evaluation_interval = 1L, + delay_evaluation = 5L) + +# Create the estimator +est <- estimator(source_directory = '.', + entry_script = 'train.R', + compute_target = compute_target) + +# Create the HyperDrive configuration +hyperdrive_run_config = hyperdrive_config( + hyperparameter_sampling = param_sampling, + primary_metric_name = 'accuracy', + primary_metric_goal = goal, + max_total_runs = 100, + max_concurrent_runs = 4, + policy = early_termination_policy, + estimator = est) + +# Submit the HyperDrive experiment +exp <- experiment(ws, name = 'myexperiment') +run = submit_experiment(exp, hyperdrive_run_config) +} +} + +\seealso{ +\code{submit_experiment()} +} diff --git a/man/inference_config.Rd b/man/inference_config.Rd new file mode 100644 index 00000000..0d6b7293 --- /dev/null +++ b/man/inference_config.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{inference_config} +\alias{inference_config} +\title{Creates the Model deployment config specific to model deployments.} +\usage{ +inference_config(entry_script, source_directory = NULL, + description = NULL, environment = NULL) +} +\arguments{ +\item{entry_script}{Path to local file that contains the code to run for the +image.} + +\item{source_directory}{paths to folders that contains all files to create +the image.} + +\item{description}{A description to give this image.} + +\item{environment}{An environment object to use for the deployment. +Doesn't have to be registered. A user should provide either this, or the +other parameters, not both. The individual parameters will NOT serve +as an override for the environment object. Exceptions include +`entry_script`, `source_directory` and `description`.} +} +\value{ +An InferenceConfig object +} +\description{ +Creates the Model deployment config specific to model deployments. +} diff --git a/man/install_azureml.Rd b/man/install_azureml.Rd new file mode 100644 index 00000000..dfb176fc --- /dev/null +++ b/man/install_azureml.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/install.R +\name{install_azureml} +\alias{install_azureml} +\title{Install azureml sdk package} +\usage{ +install_azureml(version = NULL, envname = "r-azureml", + conda_python_version = "3.6") +} +\arguments{ +\item{version}{azureml sdk package version} + +\item{envname}{name of environment to create} + +\item{conda_python_version}{version of python for conda environment} +} +\description{ +Install azureml sdk package +} diff --git a/man/invoke_webservice.Rd b/man/invoke_webservice.Rd new file mode 100644 index 00000000..9a1e8462 --- /dev/null +++ b/man/invoke_webservice.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{invoke_webservice} +\alias{invoke_webservice} +\title{Call this Webservice with the provided input.} +\usage{ +invoke_webservice(webservice, input_data) +} +\arguments{ +\item{webservice}{The webservice object.} + +\item{input_data}{The input data to call the Webservice with. This is the +data your machine learning model expects as an input to run predictions.} +} +\value{ +The result of calling the Webservice. This will return predictions +run from your machine learning model. +} +\description{ +Call this Webservice with the provided input. +} diff --git a/man/list_nodes_in_aml_compute.Rd b/man/list_nodes_in_aml_compute.Rd new file mode 100644 index 00000000..fb9bcbe0 --- /dev/null +++ b/man/list_nodes_in_aml_compute.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{list_nodes_in_aml_compute} +\alias{list_nodes_in_aml_compute} +\title{Get the details (e.g IP address, port etc) of all the compute nodes in the +compute target} +\usage{ +list_nodes_in_aml_compute(cluster) +} +\arguments{ +\item{cluster}{cluster object} +} +\value{ +Details of all the compute nodes in the cluster in data frame +} +\description{ +Get the details (e.g IP address, port etc) of all the compute nodes in the +compute target +} diff --git a/man/list_secrets.Rd b/man/list_secrets.Rd new file mode 100644 index 00000000..b2675e22 --- /dev/null +++ b/man/list_secrets.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/keyvault.R +\name{list_secrets} +\alias{list_secrets} +\title{List the secrets in a keyvault} +\usage{ +list_secrets(keyvault) +} +\arguments{ +\item{keyvault}{The \code{Keyvault} object.} +} +\value{ +A list of secret names. +} +\description{ +Returns the list of secret names for all the secrets in the keyvault +associated with the workspace. +} diff --git a/man/list_supported_vm_sizes.Rd b/man/list_supported_vm_sizes.Rd new file mode 100644 index 00000000..f1a8b3f9 --- /dev/null +++ b/man/list_supported_vm_sizes.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{list_supported_vm_sizes} +\alias{list_supported_vm_sizes} +\title{List the supported VM sizes in a region} +\usage{ +list_supported_vm_sizes(workspace, location = NULL) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{location}{A string of the location of the cluster. If not specified, +will default to the workspace location.} +} +\value{ +List of supported VM sizes in a region with name of the VM, VCPUs, +RAM in data frame +} +\description{ +List the supported VM sizes in a region +} diff --git a/man/list_workspaces.Rd b/man/list_workspaces.Rd new file mode 100644 index 00000000..208189b3 --- /dev/null +++ b/man/list_workspaces.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{list_workspaces} +\alias{list_workspaces} +\title{List all workspaces that the user has access to in a subscription ID} +\usage{ +list_workspaces(subscription_id, resource_group = NULL) +} +\arguments{ +\item{subscription_id}{A string of the specified subscription ID to +list the workspaces in.} + +\item{resource_group}{A string of the specified resource group to list +the workspaces. If \code{NULL} the method will list all the workspaces within +the specified subscription in.} +} +\value{ +A named list of \code{Workspace} objects where element name corresponds +to the workspace name. +} +\description{ +List all workspaces that the user has access to in the specified +\code{subscription_id} parameter. The list of workspaces can be filtered +based on the resource group. +} diff --git a/man/load_workspace_from_config.Rd b/man/load_workspace_from_config.Rd new file mode 100644 index 00000000..fe9d6123 --- /dev/null +++ b/man/load_workspace_from_config.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{load_workspace_from_config} +\alias{load_workspace_from_config} +\title{Load workspace configuration details from a config file} +\usage{ +load_workspace_from_config(path = NULL) +} +\arguments{ +\item{path}{A string of the path to the config file or starting directory +for search. The parameter defaults to starting the search in the current +directory.} +} +\value{ +The \code{Workspace} object. +} +\description{ +Returns a \code{Workspace} object for an existing Azure Machine Learning +workspace by reading the workspace configuration from a file. The method +provides a simple way of reusing the same workspace across multiple files or +projects. Users can save the workspace ARM properties using +\code{write_workspace_config()}, and use this method to load the same workspace +in different files or projects without retyping the workspace ARM properties. +} diff --git a/man/local_webservice_deployment_config.Rd b/man/local_webservice_deployment_config.Rd new file mode 100644 index 00000000..29792672 --- /dev/null +++ b/man/local_webservice_deployment_config.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-local.R +\name{local_webservice_deployment_config} +\alias{local_webservice_deployment_config} +\title{Create a configuration object for deploying a local Webservice.} +\usage{ +local_webservice_deployment_config(port = NULL) +} +\arguments{ +\item{port}{The local port on which to expose the service's HTTP endpoint.} +} +\value{ +LocalWebserviceDeploymentConfiguration object to use when deploying +a Webservice object. +} +\description{ +Create a configuration object for deploying a local Webservice. +} diff --git a/man/log_accuracy_table_to_run.Rd b/man/log_accuracy_table_to_run.Rd new file mode 100644 index 00000000..256289b6 --- /dev/null +++ b/man/log_accuracy_table_to_run.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_accuracy_table_to_run} +\alias{log_accuracy_table_to_run} +\title{Log a accuracy table to the artifact store.} +\usage{ +log_accuracy_table_to_run(name, value, description = "", run = NULL) +} +\arguments{ +\item{name}{The name of the accuracy table} + +\item{value}{json containing name, version, and data properties} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log a accuracy table to the artifact store. +} diff --git a/man/log_confusion_matrix_to_run.Rd b/man/log_confusion_matrix_to_run.Rd new file mode 100644 index 00000000..fe7ffefb --- /dev/null +++ b/man/log_confusion_matrix_to_run.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_confusion_matrix_to_run} +\alias{log_confusion_matrix_to_run} +\title{Log a confusion matrix to the artifact store.} +\usage{ +log_confusion_matrix_to_run(name, value, description = "", run = NULL) +} +\arguments{ +\item{name}{The name of the confusion matrix} + +\item{value}{json containing name, version, and data properties} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log a confusion matrix to the artifact store. +} diff --git a/man/log_image_to_run.Rd b/man/log_image_to_run.Rd new file mode 100644 index 00000000..253e0e14 --- /dev/null +++ b/man/log_image_to_run.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_image_to_run} +\alias{log_image_to_run} +\title{Log an image metric to the run record.} +\usage{ +log_image_to_run(name, path = NULL, plot = NULL, description = "", + run = NULL) +} +\arguments{ +\item{name}{The name of metric} + +\item{path}{The path or stream of the image} + +\item{plot}{The plot to log as an image} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log an image metric to the run record. +} diff --git a/man/log_list_to_run.Rd b/man/log_list_to_run.Rd new file mode 100644 index 00000000..3b0f7bb7 --- /dev/null +++ b/man/log_list_to_run.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_list_to_run} +\alias{log_list_to_run} +\title{Log a list metric value to the run with the given name.} +\usage{ +log_list_to_run(name, value, description = "", run = NULL) +} +\arguments{ +\item{name}{The name of metric} + +\item{value}{The value of the metric} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log a list metric value to the run with the given name. +} diff --git a/man/log_metric_to_run.Rd b/man/log_metric_to_run.Rd new file mode 100644 index 00000000..1a63d4b9 --- /dev/null +++ b/man/log_metric_to_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_metric_to_run} +\alias{log_metric_to_run} +\title{Log metric to run} +\usage{ +log_metric_to_run(name, value, run = NULL) +} +\arguments{ +\item{name}{name of the metric} + +\item{value}{value of the metric} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log metric to run +} diff --git a/man/log_predictions_to_run.Rd b/man/log_predictions_to_run.Rd new file mode 100644 index 00000000..1b9e0e5b --- /dev/null +++ b/man/log_predictions_to_run.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_predictions_to_run} +\alias{log_predictions_to_run} +\title{Log a predictions to the artifact store.} +\usage{ +log_predictions_to_run(name, value, description = "", run = NULL) +} +\arguments{ +\item{name}{The name of the predictions} + +\item{value}{json containing name, version, and data properties} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log a predictions to the artifact store. +} diff --git a/man/log_residuals_to_run.Rd b/man/log_residuals_to_run.Rd new file mode 100644 index 00000000..4cb2f2da --- /dev/null +++ b/man/log_residuals_to_run.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_residuals_to_run} +\alias{log_residuals_to_run} +\title{Log a residuals to the artifact store.} +\usage{ +log_residuals_to_run(name, value, description = "", run = NULL) +} +\arguments{ +\item{name}{The name of the predictions} + +\item{value}{json containing name, version, and data properties} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log a residuals to the artifact store. +} diff --git a/man/log_row_to_run.Rd b/man/log_row_to_run.Rd new file mode 100644 index 00000000..6d523b10 --- /dev/null +++ b/man/log_row_to_run.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_row_to_run} +\alias{log_row_to_run} +\title{Log a row metric to the run with the given name.} +\usage{ +log_row_to_run(name, description = "", run = NULL, ...) +} +\arguments{ +\item{name}{The name of metric} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} + +\item{...}{Each named parameter generates a column with the value specified.} +} +\description{ +Log a row metric to the run with the given name. +} diff --git a/man/log_table_to_run.Rd b/man/log_table_to_run.Rd new file mode 100644 index 00000000..744caf6a --- /dev/null +++ b/man/log_table_to_run.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{log_table_to_run} +\alias{log_table_to_run} +\title{Log a table metric to the run with the given name.} +\usage{ +log_table_to_run(name, value, description = "", run = NULL) +} +\arguments{ +\item{name}{The name of metric} + +\item{value}{The table value of the metric (dictionary where keys are +columns to be posted to the service)} + +\item{description}{An optional metric description} + +\item{run}{Run object. If not specified, will default to current run from +service context.} +} +\description{ +Log a table metric to the run with the given name. +} diff --git a/man/lognormal.Rd b/man/lognormal.Rd new file mode 100644 index 00000000..0c4f1c8c --- /dev/null +++ b/man/lognormal.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{lognormal} +\alias{lognormal} +\title{Specify a normal distribution of the form \code{exp(normal(mu, sigma))}} +\usage{ +lognormal(mu, sigma) +} +\arguments{ +\item{mu}{A double of the mean of the normal distribution.} + +\item{sigma}{A double of the standard deviation of the normal distribution.} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a normal distribution of the form \code{exp(normal(mu, sigma))}. + +The logarithm of the return value is normally distributed. When optimizing, +this variable is constrained to be positive. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/loguniform.Rd b/man/loguniform.Rd new file mode 100644 index 00000000..ff51a88f --- /dev/null +++ b/man/loguniform.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{loguniform} +\alias{loguniform} +\title{Specify a log uniform distribution} +\usage{ +loguniform(min_value, max_value) +} +\arguments{ +\item{min_value}{A double where the minimum value in the range will be +\code{exp(min_value)} (inclusive).} + +\item{max_value}{A double where the maximum value in the range will be +\code{exp(min_value)} (inclusive).} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a log uniform distribution. + +A value is drawn according to \code{exp(uniform(min_value, max_value))} so that +the logarithm of the return value is uniformly distributed. When optimizing, +this variable is constrained to the interval +\code{[exp(min_value), exp(max_value)]}. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/median_stopping_policy.Rd b/man/median_stopping_policy.Rd new file mode 100644 index 00000000..42de5df7 --- /dev/null +++ b/man/median_stopping_policy.Rd @@ -0,0 +1,59 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{median_stopping_policy} +\alias{median_stopping_policy} +\title{Define a median stopping policy for early termination of HyperDrive runs} +\usage{ +median_stopping_policy(evaluation_interval = 1L, delay_evaluation = 0L) +} +\arguments{ +\item{evaluation_interval}{An integer of the frequency for applying policy.} + +\item{delay_evaluation}{An integer of the number of intervals for which to +delay the first evaluation.} +} +\value{ +The \code{MedianStoppingPolicy} object. +} +\description{ +Median stopping is an early termination policy based on running averages of +primary metrics reported by the runs. This policy computes running averages +across all training runs and terminates runs whose performance is worse than +the median of the running averages. Specifically, a run will be canceled at +interval N if its best primary metric reported up to interval N is worse than +the median of the running averages for intervals 1:N across all runs. +} +\section{Details}{ + +The median stopping policy takes the following optional configuration +parameters: +\itemize{ +\item \code{evaluation_interval}: Optional. The frequency for applying the policy. +Each time the training script logs the primary metric counts as one +interval. +\item \code{delay_evaluation}: Optional. The number of intervals to delay the +policy evaluation. Use this parameter to avoid premature termination +of training runs. If specified, the policy applies every multiple of +\code{evaluation_interval} that is greater than or equal to \code{delay_evaluation}. +} + +This policy is inspired from the research publication +\href{https://ai.google/research/pubs/pub46180}{Google Vizier: A Service for Black-Box Optimization}. + +If you are looking for a conservative policy that provides savings without +terminating promising jobs, you can use a \code{MedianStoppingPolicy} with +\code{evaluation_interval = 1} and \code{delay_evaluation = 5}. These are conservative +settings that can provide approximately 25%-35% savings with no loss on +the primary metric (based on our evaluation data). +} + +\section{Examples}{ + +In this example, the early termination policy is applied at every +interval starting at evaluation interval 5. A run will be terminated at +interval 5 if its best primary metric is worse than the median of the +running averages over intervals 1:5 across all training runs.\preformatted{early_termination_policy = median_stopping_policy(evaluation_interval = 1L, + delay_evaluation = 5L) +} +} + diff --git a/man/normal.Rd b/man/normal.Rd new file mode 100644 index 00000000..a2596b8e --- /dev/null +++ b/man/normal.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{normal} +\alias{normal} +\title{Specify a real value that is normally-distributed with mean \code{mu} and standard +deviation \code{sigma}} +\usage{ +normal(mu, sigma) +} +\arguments{ +\item{mu}{A double of the mean of the normal distribution.} + +\item{sigma}{A double of the standard deviation of the normal distribution.} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a real value that is normally-distributed with mean \code{mu} and +standard deviation \code{sigma}. + +When optimizing, this is an unconstrained variable. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/package_model.Rd b/man/package_model.Rd new file mode 100644 index 00000000..d90dda73 --- /dev/null +++ b/man/package_model.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{package_model} +\alias{package_model} +\title{Create a model package in the form of a Docker image or Dockerfile build +context} +\usage{ +package_model(workspace, models, inference_config, + generate_dockerfile = FALSE) +} +\arguments{ +\item{workspace}{The workspace in which to create the package.} + +\item{models}{A list of Model objects to include in the package. Can be an +empty list.} + +\item{inference_config}{An InferenceConfig object to configure the +operation of the models. This must include an Environment object.} + +\item{generate_dockerfile}{Whether to create a Dockerfile that can be run +locally instead of building an image.} +} +\value{ +A ModelPackage object. +} +\description{ +Create a model package in the form of a Docker image or Dockerfile build +context +} diff --git a/man/primary_metric_goal.Rd b/man/primary_metric_goal.Rd new file mode 100644 index 00000000..7960fef0 --- /dev/null +++ b/man/primary_metric_goal.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{primary_metric_goal} +\alias{primary_metric_goal} +\title{Define supported metric goals for hyperparameter tuning} +\usage{ +primary_metric_goal(goal) +} +\arguments{ +\item{goal}{A string of the metric goal (either "MAXIMIZE" or "MINIMIZE").} +} +\value{ +The \code{PrimaryMetricGoal} object. +} +\description{ +A metric goal is used to determine whether a higher value for a metric +is better or worse. Metric goals are used when comparing runs based on +the primary metric. For example, you may want to maximize accuracy or +minimize error. + +The primary metric name and goal are specified to \code{hyperdrive_config()} +when you configure a HyperDrive run. +} diff --git a/man/pull_model_package_image.Rd b/man/pull_model_package_image.Rd new file mode 100644 index 00000000..6a371b3a --- /dev/null +++ b/man/pull_model_package_image.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{pull_model_package_image} +\alias{pull_model_package_image} +\title{Pull the package output to the local machine. +This can only be used with a Docker image package.} +\usage{ +pull_model_package_image(package) +} +\arguments{ +\item{package}{Package created with model(s) and dependencies.} +} +\description{ +Pull the package output to the local machine. +This can only be used with a Docker image package. +} diff --git a/man/qlognormal.Rd b/man/qlognormal.Rd new file mode 100644 index 00000000..4155ad82 --- /dev/null +++ b/man/qlognormal.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{qlognormal} +\alias{qlognormal} +\title{Specify a normal distribution of the form +\code{round(exp(normal(mu, sigma)) / q) * q}} +\usage{ +qlognormal(mu, sigma, q) +} +\arguments{ +\item{mu}{A double of the mean of the normal distribution.} + +\item{sigma}{A double of the standard deviation of the normal distribution.} + +\item{q}{An integer of the smoothing factor.} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a normal distribution of the form +\code{round(exp(normal(mu, sigma)) / q) * q}. + +Suitable for a discrete variable with respect to which the objective is +smooth and gets smoother with the size of the variable, which is bounded +from one side. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/qloguniform.Rd b/man/qloguniform.Rd new file mode 100644 index 00000000..c6864532 --- /dev/null +++ b/man/qloguniform.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{qloguniform} +\alias{qloguniform} +\title{Specify a uniform distribution of the form +\code{round(exp(uniform(min_value, max_value) / q) * q}} +\usage{ +qloguniform(min_value, max_value, q) +} +\arguments{ +\item{min_value}{A double of the minimum value in the range (inclusive).} + +\item{max_value}{A double of the maximum value in the range (inclusive).} + +\item{q}{An integer of the smoothing factor.} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a uniform distribution of the form +\code{round(exp(uniform(min_value, max_value) / q) * q}. + +This is suitable for a discrete variable with respect to which the objective +is "smooth", and gets smoother with the size of the value, but which should +be bounded both above and below. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/qnormal.Rd b/man/qnormal.Rd new file mode 100644 index 00000000..0e6c31e5 --- /dev/null +++ b/man/qnormal.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{qnormal} +\alias{qnormal} +\title{Specify a normal distribution of the \code{form round(normal(mu, sigma) / q) * q}} +\usage{ +qnormal(mu, sigma, q) +} +\arguments{ +\item{mu}{A double of the mean of the normal distribution.} + +\item{sigma}{A double of the standard deviation of the normal distribution.} + +\item{q}{An integer of the smoothing factor.} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a normal distribution of the form \code{round(normal(mu, sigma) / q) * q}. + +Suitable for a discrete variable that probably takes a value around \code{mu}, +but is fundamentally unbounded. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/quniform.Rd b/man/quniform.Rd new file mode 100644 index 00000000..f15fa87f --- /dev/null +++ b/man/quniform.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{quniform} +\alias{quniform} +\title{Specify a uniform distribution of the form +\code{round(uniform(min_value, max_value) / q) * q}} +\usage{ +quniform(min_value, max_value, q) +} +\arguments{ +\item{min_value}{A double of the minimum value in the range (inclusive).} + +\item{max_value}{A double of the maximum value in the range (inclusive).} + +\item{q}{An integer of the smoothing factor.} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a uniform distribution of the form +\code{round(uniform(min_value, max_value) / q) * q}. + +This is suitable for a discrete value with respect to which the objective +is still somewhat "smooth", but which should be bounded both above and below. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/r_environment.Rd b/man/r_environment.Rd new file mode 100644 index 00000000..e483f3cf --- /dev/null +++ b/man/r_environment.Rd @@ -0,0 +1,88 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/environment.R +\name{r_environment} +\alias{r_environment} +\title{Create an environment} +\usage{ +r_environment(name, version = NULL, environment_variables = NULL, + cran_packages = NULL, github_packages = NULL, + custom_url_packages = NULL, custom_docker_image = NULL, + image_registry_details = NULL, use_gpu = FALSE, shm_size = NULL) +} +\arguments{ +\item{name}{A string of the name of the environment.} + +\item{version}{A string of the version of the environment.} + +\item{environment_variables}{A named list of environment variables names +and values. These environment variables are set on the process where the user +script is being executed.} + +\item{cran_packages}{A character vector of CRAN packages to be installed.} + +\item{github_packages}{A character vector of GitHub packages to be installed.} + +\item{custom_url_packages}{A character vector of packages to be installed +from local directory or custom URL.} + +\item{custom_docker_image}{A string of the name of the Docker image from +which the image to use for training or deployment will be built. If not set, +a default CPU-based image will be used as the base image. To use an image +from a private Docker repository, you will also have to specify the +\code{image_registry_details} parameter.} + +\item{image_registry_details}{A \code{ContainerRegistry} object of the details of +the Docker image registry for the custom Docker image.} + +\item{use_gpu}{Indicates whether the environment should support GPUs. +If \code{TRUE}, a GPU-based default Docker image will be used in the environment. +If \code{FALSE}, a CPU-based image will be used. Default Docker images (CPU or +GPU) will only be used if the \code{custom_docker_image} parameter is not set.} + +\item{shm_size}{A string for the size of the Docker container's shared +memory block. For more information, see +\href{https://docs.docker.com/engine/reference/run/}{Docker run reference} +If not set, a default value of \code{'2g'} is used.} +} +\value{ +The \code{Environment} object. +} +\description{ +Configure the R environment to be used for training or web service +deployments. When you submit a run or deploy a model, Azure ML builds a +Docker image and creates a conda environment with your specifications from +your \code{Environment} object within that Docker container. + +If the \code{custom_docker_image} parameter +is not set, Azure ML automatically uses a default base image (CPU or GPU +depending on the \code{use_gpu} flag) and installs any R packages specified in the +\code{cran_packages}, \code{github_packages}, or \code{custom_url_packages} parameters. +TODO: link to the Dockerfiles of the default base images. +} +\section{Details}{ + +Once built, the Docker image appears in the Azure Container Registry +associated with your workspace, by default. The repository name has the form +\emph{azureml/azureml_}. The unique identifier (\emph{uuid}) part corresponds to +a hash computed from the environment configuration. This allows the service +to determine whether an image corresponding to the given environment already +exists for reuse. + +If you make changes to an existing environment, such as adding an R package, +a new version of the environment is created when you either submit a run, +deploy a model, or manually register the environment. The versioning allows +you to view changes to the environment over time. +} + +\section{Examples}{ + +The following example defines an environment that will use the default +base CPU image and install the additional e1071 package from CRAN.\preformatted{r_env <- r_environment(name = 'myr_env', + version = '1', + cran_packages = c('e1071')) +} +} + +\seealso{ +\code{estimator()}, \code{inference_config()} +} diff --git a/man/randint.Rd b/man/randint.Rd new file mode 100644 index 00000000..67089847 --- /dev/null +++ b/man/randint.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{randint} +\alias{randint} +\title{Specify a set of random integers in the range \code{[0, upper)}} +\usage{ +randint(upper) +} +\arguments{ +\item{upper}{An integer of the upper bound for the range of +integers (exclusive).} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a set of random integers in the range \code{[0, upper)} +to sample the hyperparameters from. + +The semantics of this distribution is that there is no more +correlation in the loss function between nearby integer values, +as compared with more distant integer values. This is an +appropriate distribution for describing random seeds, for example. +If the loss function is probably more correlated for nearby integer +values, then you should probably use one of the "quantized" continuous +distributions, such as either \code{quniform()}, \code{qloguniform()}, \code{qnormal()}, +or \code{qlognormal()}. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/random_parameter_sampling.Rd b/man/random_parameter_sampling.Rd new file mode 100644 index 00000000..3242cd14 --- /dev/null +++ b/man/random_parameter_sampling.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{random_parameter_sampling} +\alias{random_parameter_sampling} +\title{Define random sampling over a hyperparameter search space} +\usage{ +random_parameter_sampling(parameter_space, properties = NULL) +} +\arguments{ +\item{parameter_space}{A named list containing each parameter and its +distribution, e.g. \code{list("parameter" = distribution)}.} + +\item{properties}{A named list of additional properties for the algorithm.} +} +\value{ +The \code{RandomParameterSampling} object. +} +\description{ +In random sampling, hyperparameter values are randomly selected from the +defined search space. Random sampling allows the search space to include +both discrete and continuous hyperparameters. +} +\section{Details}{ + +In this sampling algorithm, parameter values are chosen from a set of +discrete values or a distribution over a continuous range. Functions you can +use include: +\code{choice()}, \code{randint()}, \code{uniform()}, \code{quniform()}, \code{loguniform()}, +\code{qloguniform()}, \code{normal()}, \code{qnormal()}, \code{lognormal()}, and \code{qlognormal()}. +} + +\section{Examples}{ +\preformatted{param_sampling <- random_parameter_sampling( { + 'learning_rate': normal(10, 3), + 'keep_probability': uniform(0.05, 0.1), + 'batch_size': choice(16, 32, 64, 128) + } +) +} +} + +\seealso{ +\code{choice()}, \code{randint()}, \code{uniform()}, \code{quniform()}, \code{loguniform()}, +\code{qloguniform()}, \code{normal()}, \code{qnormal()}, \code{lognormal()}, \code{qlognormal()} +} diff --git a/man/register_azure_blob_container_datastore.Rd b/man/register_azure_blob_container_datastore.Rd new file mode 100644 index 00000000..f1b247ab --- /dev/null +++ b/man/register_azure_blob_container_datastore.Rd @@ -0,0 +1,84 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datastore.R +\name{register_azure_blob_container_datastore} +\alias{register_azure_blob_container_datastore} +\title{Register an Azure blob container as a datastore} +\usage{ +register_azure_blob_container_datastore(workspace, datastore_name, + container_name, account_name, sas_token = NULL, account_key = NULL, + protocol = NULL, endpoint = NULL, overwrite = FALSE, + create_if_not_exists = FALSE, skip_validation = FALSE, + blob_cache_timeout = NULL, grant_workspace_access = FALSE, + subscription_id = NULL, resource_group = NULL) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{datastore_name}{A string of the name of the datastore. The name +must be case insensitive and can only contain alphanumeric characters and +underscores.} + +\item{container_name}{A string of the name of the Azure blob container.} + +\item{account_name}{A string of the storage account name.} + +\item{sas_token}{A string of the account SAS token.} + +\item{account_key}{A string of the storage account key.} + +\item{protocol}{A string of the protocol to use to connect to the +blob container. If \code{NULL}, defaults to \code{'https'}.} + +\item{endpoint}{A string of the endpoint of the blob container. +If \code{NULL}, defaults to \code{'core.windows.net'}.} + +\item{overwrite}{If \code{TRUE}, overwrites an existing datastore. If +the datastore does not exist, it will create one.} + +\item{create_if_not_exists}{If \code{TRUE}, creates the blob container +if it does not exists.} + +\item{skip_validation}{If \code{TRUE}, skips validation of storage keys.} + +\item{blob_cache_timeout}{An integer of the cache timeout in seconds +when this blob is mounted. If \code{NULL}, defaults to no timeout (i.e. +blobs will be cached for the duration of the job when read).} + +\item{grant_workspace_access}{If \code{TRUE}, grants workspace Managed Identities +(MSI) access to the user storage account. This should be set to \code{TRUE} if the +storage account is in VNET. If \code{TRUE}, Azure ML will use the workspace MSI +token to grant access to the user storage account. It may take a while for +the granted access to reflect.} + +\item{subscription_id}{A string of the subscription id of the storage +account.} + +\item{resource_group}{A string of the resource group of the storage account.} +} +\value{ +The \code{AzureBlobDatastore} object. +} +\description{ +Register an Azure blob container as a datastore. You can choose to use +either the SAS token or the storage account key. +} +\section{Details}{ + +In general we recommend Azure Blob storage over Azure File storage. Both +standard and premium storage are available for blobs. Although more +expensive, we suggest premium storage due to faster throughput speeds that +may improve the speed of your training runs, particularly if you train +against a large dataset. +} + +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +ds <- register_azure_blob_container_datastore( + ws, + datastore_name = 'mydatastore', + container_name = 'myazureblobcontainername', + account_name = 'mystorageaccoutname', + account_key = 'mystorageaccountkey') +} +} + diff --git a/man/register_azure_file_share_datastore.Rd b/man/register_azure_file_share_datastore.Rd new file mode 100644 index 00000000..ca833985 --- /dev/null +++ b/man/register_azure_file_share_datastore.Rd @@ -0,0 +1,67 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datastore.R +\name{register_azure_file_share_datastore} +\alias{register_azure_file_share_datastore} +\title{Register an Azure file share as a datastore} +\usage{ +register_azure_file_share_datastore(workspace, datastore_name, + file_share_name, account_name, sas_token = NULL, account_key = NULL, + protocol = NULL, endpoint = NULL, overwrite = FALSE, + create_if_not_exists = FALSE, skip_validation = FALSE) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{datastore_name}{A string of the name of the datastore. The name +must be case insensitive and can only contain alphanumeric characters and +underscores.} + +\item{file_share_name}{A string of the name of the Azure file share.} + +\item{account_name}{A string of the storage account name.} + +\item{sas_token}{A string of the account SAS token.} + +\item{account_key}{A string of the storage account key.} + +\item{protocol}{A string of the protocol to use to connect to the +file store. If \code{NULL}, defaults to \code{'https'}.} + +\item{endpoint}{A string of the endpoint of the file store. +If \code{NULL}, defaults to \code{'core.windows.net'}.} + +\item{overwrite}{If \code{TRUE}, overwrites an existing datastore. If +the datastore does not exist, it will create one.} + +\item{create_if_not_exists}{If \code{TRUE}, creates the file share +if it does not exists.} + +\item{skip_validation}{If \code{TRUE}, skips validation of storage keys.} +} +\value{ +The \code{AzureFileDatastore} object. +} +\description{ +Register an Azure file share as a datastore. You can choose to use +either the SAS token or the storage account key. +} +\section{Details}{ + +In general we recommend Azure Blob storage over Azure File storage. Both +standard and premium storage are available for blobs. Although more +expensive, we suggest premium storage due to faster throughput speeds that +may improve the speed of your training runs, particularly if you train +against a large dataset. +} + +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +ds <- register_azure_file_share_datastore( + ws, + datastore_name = 'mydatastore', + file_share_name = 'myazurefilesharename', + account_name = 'mystorageaccoutname', + account_key = 'mystorageaccountkey') +} +} + diff --git a/man/register_environment.Rd b/man/register_environment.Rd new file mode 100644 index 00000000..8e2e682d --- /dev/null +++ b/man/register_environment.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/environment.R +\name{register_environment} +\alias{register_environment} +\title{Register an environment in the workspace} +\usage{ +register_environment(environment, workspace) +} +\arguments{ +\item{environment}{The \code{Environment} object.} + +\item{workspace}{The \code{Workspace} object.} +} +\value{ +The \code{Environment} object. +} +\description{ +The environment is automatically registered with your workspace when you +submit an experiment or deploy a web service. You can also manually register +the environment with \code{register_environment()}. This operation makes the +environment into an entity that is tracked and versioned in the cloud, and +can be shared between workspace users. + +Whe used for the first time in training or deployment, the environment is +registered with the workspace, built, and deployed on the compute target. +The environments are cached by the service. Reusing a cached environment +takes much less time than using a new service or one that has bee updated. +} diff --git a/man/register_model.Rd b/man/register_model.Rd new file mode 100644 index 00000000..4eb82515 --- /dev/null +++ b/man/register_model.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{register_model} +\alias{register_model} +\title{Register a model with the provided workspace.} +\usage{ +register_model(workspace, model_path, model_name, tags = NULL, + properties = NULL, description = NULL, child_paths = NULL) +} +\arguments{ +\item{workspace}{The workspace to register the model under} + +\item{model_path}{String which points to the path on the local file system +where the model assets are located. This can be a direct pointer to a single +file or folder. If pointing to a folder, the child_paths parameter can be +used to specify individual files to bundle together as the Model object, +as opposed to using the entire contents of the folder.} + +\item{model_name}{The name to register the model with} + +\item{tags}{Dictionary of key value tags to give the model} + +\item{properties}{Dictionary of key value properties to give the model. +These properties cannot be changed after model creation, however new key +value pairs can be added} + +\item{description}{A text description of the model} + +\item{child_paths}{If provided in conjunction with a model_path to a folder, +only the specified files will be bundled into the Model object.} +} +\value{ +The registered Model object +} +\description{ +Register a model with the provided workspace. +} diff --git a/man/reload_local_webservice_assets.Rd b/man/reload_local_webservice_assets.Rd new file mode 100644 index 00000000..b1c48088 --- /dev/null +++ b/man/reload_local_webservice_assets.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-local.R +\name{reload_local_webservice_assets} +\alias{reload_local_webservice_assets} +\title{Reload the LocalWebservice's execution script and dependencies. +This restarts the service's container with copies of updated assets, +including the execution script and local dependencies, but it does not +rebuild the underlying image. Accordingly, changes to Conda/pip dependencies +or custom Docker steps will not be reflected in the reloaded LocalWebservice. +To handle those changes call LocalWebservice.update(), instead.} +\usage{ +reload_local_webservice_assets(webservice, wait = FALSE) +} +\arguments{ +\item{webservice}{LocalWebservice object.} + +\item{wait}{Wait for the service's container to reach a healthy state.} +} +\description{ +Reload the LocalWebservice's execution script and dependencies. +This restarts the service's container with copies of updated assets, +including the execution script and local dependencies, but it does not +rebuild the underlying image. Accordingly, changes to Conda/pip dependencies +or custom Docker steps will not be reflected in the reloaded LocalWebservice. +To handle those changes call LocalWebservice.update(), instead. +} diff --git a/man/save_model_package_files.Rd b/man/save_model_package_files.Rd new file mode 100644 index 00000000..7c741f13 --- /dev/null +++ b/man/save_model_package_files.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{save_model_package_files} +\alias{save_model_package_files} +\title{Save the package output to a local directory. +This can only be used with a Dockerfile package.} +\usage{ +save_model_package_files(package, output_directory) +} +\arguments{ +\item{package}{Package created with model(s) and dependencies.} + +\item{output_directory}{Local directory that will be created to contain +the contents of the package.} +} +\description{ +Save the package output to a local directory. +This can only be used with a Dockerfile package. +} diff --git a/man/serialize_model.Rd b/man/serialize_model.Rd new file mode 100644 index 00000000..5236c7f5 --- /dev/null +++ b/man/serialize_model.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{serialize_model} +\alias{serialize_model} +\title{Convert this Model into a json serialized dictionary} +\usage{ +serialize_model(model) +} +\arguments{ +\item{model}{The model to download} +} +\value{ +The json representation of this Model +} +\description{ +Convert this Model into a json serialized dictionary +} diff --git a/man/serialize_webservice.Rd b/man/serialize_webservice.Rd new file mode 100644 index 00000000..865e31e3 --- /dev/null +++ b/man/serialize_webservice.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{serialize_webservice} +\alias{serialize_webservice} +\title{Convert this Webservice into a json serialized dictionary.} +\usage{ +serialize_webservice(webservice) +} +\arguments{ +\item{webservice}{The webservice object.} +} +\value{ +The json representation of this Webservice +} +\description{ +Convert this Webservice into a json serialized dictionary. +} diff --git a/man/set_default_datastore.Rd b/man/set_default_datastore.Rd new file mode 100644 index 00000000..b16c9f76 --- /dev/null +++ b/man/set_default_datastore.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{set_default_datastore} +\alias{set_default_datastore} +\title{Set the default datastore for a workspace} +\usage{ +set_default_datastore(workspace, datastore_name) +} +\arguments{ +\item{workspace}{The \code{Workspace} object.} + +\item{datastore_name}{The name of the datastore to be set as default.} +} +\description{ +Set the default datastore associated with the workspace. +} diff --git a/man/set_secrets.Rd b/man/set_secrets.Rd new file mode 100644 index 00000000..605be8b1 --- /dev/null +++ b/man/set_secrets.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/keyvault.R +\name{set_secrets} +\alias{set_secrets} +\title{Add secrets to a keyvault} +\usage{ +set_secrets(keyvault, secrets) +} +\arguments{ +\item{keyvault}{The \code{Keyvault} object.} + +\item{secrets}{The named list of secrets to be added to the keyvault, +where element name corresponds to the secret name.} +} +\description{ +Add a named list of secrets into the keyvault associated with the +workspace. +} +\section{Examples}{ +\preformatted{ws <- load_workspace_from_config() +my_secret <- Sys.getenv("MY_SECRET") +keyvault <- get_default_keyvault(ws) +set_secrets(list("mysecret" = my_secret)) +} +} + diff --git a/man/submit_experiment.Rd b/man/submit_experiment.Rd new file mode 100644 index 00000000..9b6302e3 --- /dev/null +++ b/man/submit_experiment.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiment.R +\name{submit_experiment} +\alias{submit_experiment} +\title{Submit an experiment and return the active created run} +\usage{ +submit_experiment(experiment, config, tags = NULL) +} +\arguments{ +\item{experiment}{The \code{Experiment} object.} + +\item{config}{The \code{Estimator} or \code{HyperDriveConfig} object.} + +\item{tags}{A named list of tags for the submitted run, e.g. +\code{list("tag" = "value")}.} +} +\value{ +The \code{ScriptRun} or \code{HyperDriveRun} object. +} +\description{ +\code{submit_experiment()} is an asynchronous call to Azure Machine Learning +service to execute a trial on local or remote compute. Depending on the +configuration, \code{submit_experiment()} will automatically prepare your +execution environments, execute your code, and capture your source code +and results in the experiment's run history. + +To submit an experiment you first need to create a configuration object +describing how the experiment is to be run. The configuration depends on +the type of trial required. For a script run, provide an \code{Estimator} object +to the \code{config} parameter. For a HyperDrive run for hyperparameter tuning, +provide a \code{HyperDriveConfig} to \code{config}. +} +\section{Examples}{ + +The following example submits an Estimator experiment.\preformatted{ws <- load_workspace_from_config() +compute_target <- get_compute(ws, cluster_name = 'mycluster') +exp <- experiment(ws, name = 'myexperiment') +est <- estimator(source_directory = '.', + entry_script = 'train.R', + compute_target = compute_target) +run <- submit_experiment(exp, est) +} + +For an example of submitting a HyperDrive experiment, see the +"Examples" section of \code{hyperdrive_config()}. +} + +\seealso{ +\code{estimator()}, \code{hyperdrive_config()} +} diff --git a/man/truncation_selection_policy.Rd b/man/truncation_selection_policy.Rd new file mode 100644 index 00000000..0362eb31 --- /dev/null +++ b/man/truncation_selection_policy.Rd @@ -0,0 +1,66 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{truncation_selection_policy} +\alias{truncation_selection_policy} +\title{Define a truncation selection policy for early termination of HyperDrive runs} +\usage{ +truncation_selection_policy(truncation_percentage, + evaluation_interval = 1L, delay_evaluation = 0L) +} +\arguments{ +\item{truncation_percentage}{An integer of the percentage of lowest +performing runs to terminate at each interval.} + +\item{evaluation_interval}{An integer of the frequency for applying policy.} + +\item{delay_evaluation}{An integer of the number of intervals for which to +delay the first evaluation.} +} +\value{ +The \code{TruncationSelectionPolicy} object. +} +\description{ +Truncation selection cancels a given percentage of lowest performing runs at +each evaluation interval. Runs are compared based on their performance on the +primary metric and the lowest X% are terminated. +} +\section{Details}{ + +This policy periodically cancels the given percentage of runs that rank the +lowest for their performance on the primary metric. The policy strives for +fairness in ranking the runs by accounting for improving model performance +with training time. When ranking a relatively young run, the policy uses the +corresponding (and earlier) performance of older runs for comparison. +Therefore, runs aren't terminated for having a lower performance because they +have run for less time than other runs. + +The truncation selection policy takes the following configuration parameters: +\itemize{ +\item \code{truncation_percentage}: An integer of the percentage of lowest performing +runs to terminate at each evaluation interval. +\item \code{evaluation_interval}: Optional. The frequency for applying the policy. +Each time the training script logs the primary metric counts as one +interval. +\item \code{delay_evaluation}: Optional. The number of intervals to delay the +policy evaluation. Use this parameter to avoid premature termination +of training runs. If specified, the policy applies every multiple of +\code{evaluation_interval} that is greater than or equal to \code{delay_evaluation}. +} + +For example, when evaluating a run at a interval N, its performance is only +compared with the performance of other runs up to interval N even if they +reported metrics for intervals greater than N. +} + +\section{Examples}{ + +In this example, the early termination policy is applied at every interval +starting at evaluation interval 5. A run will be terminated at interval 5 +if its performance at interval 5 is in the lowest 20% of performance of all +runs at interval 5.\preformatted{early_termination_policy = truncation_selection_policy( + truncation_percentage = 20L, + evaluation_interval = 1L, + delay_evaluation = 5L) +} +} + diff --git a/man/uniform.Rd b/man/uniform.Rd new file mode 100644 index 00000000..e0bad05c --- /dev/null +++ b/man/uniform.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hyperdrive.R +\name{uniform} +\alias{uniform} +\title{Specify a uniform distribution of options to sample from} +\usage{ +uniform(min_value, max_value) +} +\arguments{ +\item{min_value}{A double of the minimum value in the range +(inclusive).} + +\item{max_value}{A double of the maximum value in the range +(inclusive).} +} +\value{ +A list of the stochastic expression. +} +\description{ +Specify a uniform distribution of options to sample the +hyperparameters from. +} +\seealso{ +\code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, +\code{bayesian_parameter_sampling()} +} diff --git a/man/unregister_datastore.Rd b/man/unregister_datastore.Rd new file mode 100644 index 00000000..3dc61898 --- /dev/null +++ b/man/unregister_datastore.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datastore.R +\name{unregister_datastore} +\alias{unregister_datastore} +\title{Unregister a datastore from its associated workspace} +\usage{ +unregister_datastore(datastore) +} +\arguments{ +\item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} +} +\description{ +Unregister the datastore from its associated workspace. The +underlying Azure storage will not be deleted. +} diff --git a/man/update_aci_webservice.Rd b/man/update_aci_webservice.Rd new file mode 100644 index 00000000..542850ef --- /dev/null +++ b/man/update_aci_webservice.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-aci.R +\name{update_aci_webservice} +\alias{update_aci_webservice} +\title{Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice.} +\usage{ +update_aci_webservice(webservice, tags = NULL, properties = NULL, + description = NULL, auth_enabled = NULL, ssl_enabled = NULL, + ssl_cert_pem_file = NULL, ssl_key_pem_file = NULL, + ssl_cname = NULL, enable_app_insights = NULL, models = NULL, + inference_config = NULL) +} +\arguments{ +\item{webservice}{AciWebservice object.} + +\item{tags}{Dictionary of key value tags to give this Webservice. +Will replace existing tags.} + +\item{properties}{Dictionary of key value properties to add to existing +properties dictionary.} + +\item{description}{A description to give this Webservice.} + +\item{auth_enabled}{Enable or disable auth for this Webservice.} + +\item{ssl_enabled}{Whether or not to enable SSL for this Webservice.} + +\item{ssl_cert_pem_file}{The cert file needed if SSL is enabled.} + +\item{ssl_key_pem_file}{The key file needed if SSL is enabled.} + +\item{ssl_cname}{The cname for if SSL is enabled.} + +\item{enable_app_insights}{Whether or not to enable AppInsights for this +Webservice.} + +\item{models}{A list of Model objects to package into the updated service.} + +\item{inference_config}{An InferenceConfig object used to provide the +required model deployment properties.} +} +\description{ +Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice. +} diff --git a/man/update_aks_webservice.Rd b/man/update_aks_webservice.Rd new file mode 100644 index 00000000..f4340c37 --- /dev/null +++ b/man/update_aks_webservice.Rd @@ -0,0 +1,104 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-aks.R +\name{update_aks_webservice} +\alias{update_aks_webservice} +\title{Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice.} +\usage{ +update_aks_webservice(webservice, autoscale_enabled = NULL, + autoscale_min_replicas = NULL, autoscale_max_replicas = NULL, + autoscale_refresh_seconds = NULL, + autoscale_target_utilization = NULL, auth_enabled = NULL, + cpu_cores = NULL, memory_gb = NULL, enable_app_insights = NULL, + scoring_timeout_ms = NULL, replica_max_concurrent_requests = NULL, + max_request_wait_time = NULL, num_replicas = NULL, tags = NULL, + properties = NULL, description = NULL, models = NULL, + inference_config = NULL, gpu_cores = NULL, period_seconds = NULL, + initial_delay_seconds = NULL, timeout_seconds = NULL, + success_threshold = NULL, failure_threshold = NULL, + namespace = NULL, token_auth_enabled = NULL) +} +\arguments{ +\item{webservice}{AciWebservice object.} + +\item{autoscale_enabled}{Enable or disable autoscaling of this Webservice} + +\item{autoscale_min_replicas}{The minimum number of containers to use when +autoscaling this Webservice} + +\item{autoscale_max_replicas}{The maximum number of containers to use when +autoscaling this Webservice} + +\item{autoscale_refresh_seconds}{How often the autoscaler should attempt to +scale this Webservice} + +\item{autoscale_target_utilization}{The target utilization (in percent out +of 100) the autoscaler should attempt to maintain for this Webservice} + +\item{auth_enabled}{Whether or not to enable auth for this Webservice} + +\item{cpu_cores}{The number of cpu cores to allocate for this Webservice. +Can be a decimal} + +\item{memory_gb}{The amount of memory (in GB) to allocate for this +Webservice. Can be a decimal} + +\item{enable_app_insights}{Whether or not to enable Application Insights +logging for this Webservice} + +\item{scoring_timeout_ms}{A timeout to enforce for scoring calls to this +Webservice} + +\item{replica_max_concurrent_requests}{The number of maximum concurrent +requests per node to allow for this Webservice} + +\item{max_request_wait_time}{The maximum amount of time a request will stay +in the queue (in milliseconds) before returning a 503 error} + +\item{num_replicas}{The number of containers to allocate for this Webservice} + +\item{tags}{Dictionary of key value tags to give this Webservice. Will +replace existing tags.} + +\item{properties}{Dictionary of key value properties to add to existing +properties dictionary} + +\item{description}{A description to give this Webservice} + +\item{models}{A list of Model objects to package with the updated service} + +\item{inference_config}{An InferenceConfig object used to provide the +required model deployment properties.} + +\item{gpu_cores}{The number of gpu cores to allocate for this Webservice} + +\item{period_seconds}{How often (in seconds) to perform the liveness probe. +Default to 10 seconds. Minimum value is 1.} + +\item{initial_delay_seconds}{Number of seconds after the container has +started before liveness probes are initiated.} + +\item{timeout_seconds}{Number of seconds after which the liveness probe +times out. Defaults to 1 second. Minimum value is 1.} + +\item{success_threshold}{Minimum consecutive successes for the liveness +probe to be considered successful after having failed. Defaults to 1. +Minimum value is 1.} + +\item{failure_threshold}{When a Pod starts and the liveness probe fails, +Kubernetes will try failureThreshold times before giving up. Defaults to 3. +Minimum value is 1.} + +\item{namespace}{The Kubernetes namespace in which to deploy this +Webservice: up to 63 lowercase alphanumeric ('a'-'z', '0'-'9') and hyphen +('-') characters. The first and last characters cannot be hyphens.} + +\item{token_auth_enabled}{Whether or not to enable Token auth for this +Webservice. If this is enabled, users can access this Webservice by fetching +access token using their Azure Active Directory credentials. +Defaults to FALSE} +} +\description{ +Update the Webservice with provided properties. +Values left as None will remain unchanged in this Webservice. +} diff --git a/man/update_aml_compute.Rd b/man/update_aml_compute.Rd new file mode 100644 index 00000000..1ad416c9 --- /dev/null +++ b/man/update_aml_compute.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{update_aml_compute} +\alias{update_aml_compute} +\title{Update scale settings for an AmlCompute cluster} +\usage{ +update_aml_compute(cluster, min_nodes = NULL, max_nodes = NULL, + idle_seconds_before_scaledown = NULL) +} +\arguments{ +\item{cluster}{The \code{AmlCompute} cluster.} + +\item{min_nodes}{An integer of the minimum number of nodes to use on +the cluster.} + +\item{max_nodes}{An integer of the maximum number of nodes to use on +the cluster.} + +\item{idle_seconds_before_scaledown}{An integer of the node idle time +in seconds before scaling down the cluster.} +} +\description{ +Update the scale settings for an existing AmlCompute cluster. +} diff --git a/man/update_local_webservice.Rd b/man/update_local_webservice.Rd new file mode 100644 index 00000000..ac73a2ef --- /dev/null +++ b/man/update_local_webservice.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice-local.R +\name{update_local_webservice} +\alias{update_local_webservice} +\title{Update the LocalWebservice with provided properties. +Values left as None will remain unchanged in this LocalWebservice.} +\usage{ +update_local_webservice(webservice, models = NULL, + deployment_config = NULL, wait = FALSE, inference_config = NULL) +} +\arguments{ +\item{webservice}{LocalWebservice object.} + +\item{models}{A new list of models contained in the LocalWebservice.} + +\item{deployment_config}{Deployment configuration options to apply to the +LocalWebservice.} + +\item{wait}{Wait for the service's container to reach a healthy state.} + +\item{inference_config}{An InferenceConfig object used to provide the +required model deployment properties.} +} +\description{ +Update the LocalWebservice with provided properties. +Values left as None will remain unchanged in this LocalWebservice. +} diff --git a/man/upload_files_to_datastore.Rd b/man/upload_files_to_datastore.Rd new file mode 100644 index 00000000..b3852379 --- /dev/null +++ b/man/upload_files_to_datastore.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datastore.R +\name{upload_files_to_datastore} +\alias{upload_files_to_datastore} +\title{Upload files to the Azure storage a datastore points to} +\usage{ +upload_files_to_datastore(datastore, files, relative_root = NULL, + target_path = NULL, overwrite = FALSE, show_progress = TRUE) +} +\arguments{ +\item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} + +\item{files}{A list of strings of the absolute path to files to upload.} + +\item{relative_root}{A string of the base path from which is used to +determine the path of the files in the Azure storage. For example, if +we upload \code{/path/to/file.txt}, and we define the base path to be \code{/path}, +when \code{file.txt} is uploaded to the blob storage or file share, it will +have the path of \code{/to/file.txt}. If \code{target_path} is also given, then it +will be used as the prefix for the derived path from above. The base path +must be a common path of all of the files, otherwise an exception will be +thrown.} + +\item{target_path}{A string of the location in the blob container or file +share to upload the data to. Defaults to \code{NULL}, in which case the data is +uploaded to the root.} + +\item{overwrite}{If \code{TRUE}, overwrites any existing data at \code{target_path}.} + +\item{show_progress}{If \code{TRUE}, show progress of upload in the console.} +} +\value{ +The \code{DataReference} object for the target path uploaded. +} +\description{ +Upload the data from the local file system to the Azure storage that the +datastore points to. +} diff --git a/man/upload_to_datastore.Rd b/man/upload_to_datastore.Rd new file mode 100644 index 00000000..0c95c725 --- /dev/null +++ b/man/upload_to_datastore.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/datastore.R +\name{upload_to_datastore} +\alias{upload_to_datastore} +\title{Upload a local directory to the Azure storage a datastore points to} +\usage{ +upload_to_datastore(datastore, src_dir, target_path = NULL, + overwrite = FALSE, show_progress = TRUE) +} +\arguments{ +\item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} + +\item{src_dir}{A string of the local directory to upload.} + +\item{target_path}{A string of the location in the blob container or +file share to upload the data to. Defaults to \code{NULL}, in which case the data +is uploaded to the root.} + +\item{overwrite}{If \code{TRUE}, overwrites any existing data at \code{target_path}.} + +\item{show_progress}{If \code{TRUE}, show progress of upload in the console.} +} +\value{ +The \code{DataReference} object for the target path uploaded. +} +\description{ +Upload a local directory to the Azure storage the datastore points to. +} diff --git a/man/view_run_details.Rd b/man/view_run_details.Rd new file mode 100644 index 00000000..72309235 --- /dev/null +++ b/man/view_run_details.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{view_run_details} +\alias{view_run_details} +\title{Plot table of run details in Viewer} +\usage{ +view_run_details(run) +} +\arguments{ +\item{run}{run used for plotting} +} +\description{ +Plot table of run details in Viewer +} diff --git a/man/wait_for_deployment.Rd b/man/wait_for_deployment.Rd new file mode 100644 index 00000000..28b5164c --- /dev/null +++ b/man/wait_for_deployment.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/webservice.R +\name{wait_for_deployment} +\alias{wait_for_deployment} +\title{Automatically poll on the running Webservice deployment.} +\usage{ +wait_for_deployment(webservice, show_output = FALSE) +} +\arguments{ +\item{webservice}{The webservice object.} + +\item{show_output}{Option to print more verbose output.} +} +\description{ +Automatically poll on the running Webservice deployment. +} diff --git a/man/wait_for_model_package_creation.Rd b/man/wait_for_model_package_creation.Rd new file mode 100644 index 00000000..74063057 --- /dev/null +++ b/man/wait_for_model_package_creation.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{wait_for_model_package_creation} +\alias{wait_for_model_package_creation} +\title{Wait for the package to finish creating.} +\usage{ +wait_for_model_package_creation(package, show_output = FALSE) +} +\arguments{ +\item{package}{Package created with model(s) and dependencies.} + +\item{show_output}{Boolean option to print more verbose output. Defaults to +FALSE.} +} +\description{ +Wait for the package to finish creating. +} diff --git a/man/wait_for_provisioning_completion.Rd b/man/wait_for_provisioning_completion.Rd new file mode 100644 index 00000000..653dc883 --- /dev/null +++ b/man/wait_for_provisioning_completion.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/compute.R +\name{wait_for_provisioning_completion} +\alias{wait_for_provisioning_completion} +\title{Wait for a cluster to finish provisioning} +\usage{ +wait_for_provisioning_completion(cluster, show_output = FALSE) +} +\arguments{ +\item{cluster}{The \code{AmlCompute} or \code{AksCompute} object.} + +\item{show_output}{If \code{TRUE}, more verbose output will be provided.} +} +\description{ +Wait for a cluster to finish provisioning. Typically invoked after a +\code{create_aml_compute()} or \code{create_aks_compute()} call. +} +\section{Examples}{ + +Wait for an AmlCompute cluster to finish provisioning.\preformatted{ws <- load_workspace_from_config() +compute_target <- create_aml_compute(ws, + cluster_name = 'mycluster', + vm_size = 'STANDARD_D2_V2', + max_nodes = 1) +wait_for_provisioning_completion(compute_target) +} +} + +\seealso{ +\code{create_aml_compute()}, \code{create_aks_compute()} +} diff --git a/man/wait_for_run_completion.Rd b/man/wait_for_run_completion.Rd new file mode 100644 index 00000000..ae8a219d --- /dev/null +++ b/man/wait_for_run_completion.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run.R +\name{wait_for_run_completion} +\alias{wait_for_run_completion} +\title{Wait for the completion of this run} +\usage{ +wait_for_run_completion(run, show_output = TRUE) +} +\arguments{ +\item{run}{Run object} + +\item{show_output}{print verbose output to console} +} +\description{ +Wait for the completion of this run +} diff --git a/man/write_workspace_config.Rd b/man/write_workspace_config.Rd new file mode 100644 index 00000000..068d96f1 --- /dev/null +++ b/man/write_workspace_config.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{write_workspace_config} +\alias{write_workspace_config} +\title{Write out the workspace configuration details to a config file} +\usage{ +write_workspace_config(workspace, path = NULL, file_name = NULL) +} +\arguments{ +\item{workspace}{The \code{Workspace} object whose config has to be written down.} + +\item{path}{A string of the location to write the config.json file. +The parameter defaults to the current working directory.} + +\item{file_name}{A string of the name to use for the config file. The +parameter defaults to \code{'config.json'}.} +} +\description{ +Write out the workspace ARM properties to a config file. Workspace ARM +properties can be loaded later using \code{load_workspace_from_config()}. +The method provides a simple way of reusing the same workspace across +multiple files or projects. Users can save the workspace ARM properties +using this function, and use \code{load_workspace_from_config()} to load the +same workspace in different files or projects without retyping the +workspace ARM properties. +} diff --git a/samples/README.md b/samples/README.md new file mode 100644 index 00000000..42468592 --- /dev/null +++ b/samples/README.md @@ -0,0 +1,21 @@ +## Using basic AzureML APIs + +Before running the samples in RStudio, change to the sample directory in Rstudio using `setwd(dirname)`. +The examples assume that the data and scripts are in the current directory. + +1. [setup.R](setup.R): Setup workspace before running samples. +2. [Train on ML Compute](training/train-on-amlcompute): Train a model using an ML Compute cluster as compute target. +3. [Deploying a model to Cloud](deployment/deploy-to-cloud): Deploy a model as a Webservice. + +### Troubleshooting + +- If the following error occurs when submitting an experiment using RStudio: + ```R + Error in py_call_impl(callable, dots$args, dots$keywords) : + PermissionError: [Errno 13] Permission denied + ``` + Move the files for your project into a subdirectory and reset the working directory to that directory before re-submitting. + + In order to submit an experiment, AzureML SDK must create a .zip file of the project directory to send to the service. However, + the SDK does not have permission to write into the .Rproj.user subdirectory that is automatically created during an RStudio + session. For this reason, best practice is to isolate project files into their own directory. \ No newline at end of file diff --git a/samples/deployment/deploy-to-cloud/deploy-to-cloud.R b/samples/deployment/deploy-to-cloud/deploy-to-cloud.R new file mode 100644 index 00000000..65b18e57 --- /dev/null +++ b/samples/deployment/deploy-to-cloud/deploy-to-cloud.R @@ -0,0 +1,53 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT license. + +library("azureml") +library("jsonlite") + +ws <- load_workspace_from_config() + +# register the model +model <- register_model(ws, model_path = "model.rds", model_name = "model.rds") +r_env <- r_environment(name = "r_env") + +# create inference config +inference_config <- inference_config( + entry_script = "score.R", + source_directory = ".", + environment = r_env) + +# create ACI deployment config +deployment_config <- aci_webservice_deployment_config(cpu_cores = 1, + memory_gb = 1) + +# deploy the webservice +service <- deploy_model(ws, + 'rservice', + list(model), + inference_config, + deployment_config) +wait_for_deployment(service, show_output = TRUE) + +# If you encounter any issue in deploying the webservice, please visit +# https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-troubleshoot-deployment + +# Inferencing +# versicolor +plant <- data.frame(Sepal.Length = 6.4, + Sepal.Width = 2.8, + Petal.Length = 4.6, + Petal.Width = 1.8) +# setosa +plant <- data.frame(Sepal.Length = 5.1, + Sepal.Width = 3.5, + Petal.Length = 1.4, + Petal.Width = 0.2) +# virginica +plant <- data.frame(Sepal.Length = 6.7, + Sepal.Width = 3.3, + Petal.Length = 5.2, + Petal.Width = 2.3) + +predicted_val <- invoke_webservice(service, toJSON(plant)) +predicted_val + diff --git a/samples/deployment/deploy-to-cloud/model.rds b/samples/deployment/deploy-to-cloud/model.rds new file mode 100644 index 00000000..9e46c2fe Binary files /dev/null and b/samples/deployment/deploy-to-cloud/model.rds differ diff --git a/samples/deployment/deploy-to-cloud/score.R b/samples/deployment/deploy-to-cloud/score.R new file mode 100644 index 00000000..d78a0c10 --- /dev/null +++ b/samples/deployment/deploy-to-cloud/score.R @@ -0,0 +1,19 @@ +#' Copyright(c) Microsoft Corporation. +#' Licensed under the MIT license. + +library(jsonlite) + +init <- function() +{ + model_path <- Sys.getenv("AZUREML_MODEL_DIR") + model <- readRDS(file.path(model_path, "model.rds")) + message("model is loaded") + + function(data) + { + plant <- as.data.frame(fromJSON(data)) + prediction <- predict(model, plant) + result <- as.character(prediction) + toJSON(result) + } +} \ No newline at end of file diff --git a/samples/setup.R b/samples/setup.R new file mode 100644 index 00000000..79e61492 --- /dev/null +++ b/samples/setup.R @@ -0,0 +1,20 @@ +# skip the lines before `subscription_id <- ...` if azureml R package and its +# python sdk is already installed. +devtools::install_github('https://github.com/Azure/azureml-sdk-for-r') + +library(azureml) +install_azureml() + +subscription_id <- Sys.getenv("SUBSCRIPTION_ID", unset = "") +resource_group <- Sys.getenv("RESOURCE_GROUP", unset = "") +workspace_name <- Sys.getenv("WORKSPACE_NAME", unset = "") +location <- Sys.getenv("WORKSPACE_REGION", unset = "") + +ws <- create_workspace(name = workspace_name, + subscription_id = subscription_id, + resource_group = resource_group, + location = location, + create_resource_group = TRUE, + exist_ok = TRUE) +write_workspace_config(ws, path = '.') + diff --git a/samples/training/mlflow/azureml_mlflow.R b/samples/training/mlflow/azureml_mlflow.R new file mode 100644 index 00000000..2ea0d01d --- /dev/null +++ b/samples/training/mlflow/azureml_mlflow.R @@ -0,0 +1,70 @@ +install.packages('AzureAuth') +install.packages('promises') +install.packages('future') + +library(future) +library(promises) + +library(AzureAuth) +library(mlflow) +# install python package for mlflow +install_mlflow() + + +get_tracking_uri <- function(region, + subscription_id, + resource_group, + workspace_name) { + sprintf(paste0('https://%s.experiments.azureml.net/history/v1.0/', + 'subscriptions/%s/resourceGroups/%s/providers/', + 'Microsoft.MachineLearningServices/workspaces/%s'), + region, + subscription_id, + resource_group, + workspace_name) +} + +tenant_id <- Sys.getenv('TENANT_ID', unset = '') +client_id <- Sys.getenv('CLIENT_ID', unset = '% f +} +future(Sys.sleep(3600)) %...>% f + + +region <- Sys.getenv('REGION', unset = ' 0, "No file is generated from the run") + + # tear down resources + unlink(tmp_dir_name, recursive = TRUE) +}) + +test_that("submit experiment through a custom environment", { + ws <- existing_ws + + # start a remote job and get the run, wait for it to finish + tmp_dir_name <- "tmp_dir" + script_name <- "train_dummy.R" + dir.create(tmp_dir_name) + file.copy(script_name, tmp_dir_name) + + env <- r_environment("myenv", cran_packages = c("ggplot2", "dplyr")) + + est <- estimator(tmp_dir_name, + compute_target = existing_compute$name, + entry_script = script_name, + environment = env) + + exp <- experiment(ws, "estimator_run") + run <- submit_experiment(exp, est) + wait_for_run_completion(run, show_output = TRUE) + expect_equal(run$status, "Completed") + + # tear down resources + unlink(tmp_dir_name, recursive = TRUE) +}) \ No newline at end of file diff --git a/tests/testthat/test_hyperdrive.R b/tests/testthat/test_hyperdrive.R new file mode 100644 index 00000000..13ef1218 --- /dev/null +++ b/tests/testthat/test_hyperdrive.R @@ -0,0 +1,62 @@ +context("hyperdrive") + +test_that("create hyperdrive config, launch runs, get run metrics", { + experiment_name <- "test_experiment" + + ws <- existing_ws + + # get offline run + run <- get_current_run() + + # create experiment + exp <- experiment(ws, experiment_name) + expect_equal(exp$name, experiment_name) + + # get existing experiment + exp <- experiment(ws, experiment_name) + expect_equal(exp$name, experiment_name) + + # start a remote job and get the run, wait for it to finish + tmp_dir_name <- "tmp_dir" + script_name <- "train_hyperdrive_dummy.R" + + dir.create(tmp_dir_name) + file.copy(script_name, tmp_dir_name) + + script_params <- list(number_1 = 3, number_2 = 2) + est <- estimator(source_directory = tmp_dir_name, + entry_script = script_name, + compute_target = existing_compute$name, + script_params = script_params) + + # define sampling and policy for hyperparameter tuning + sampling <- + grid_parameter_sampling(list(number_1 = choice(c(3, 6)), + number_2 = choice(c(2, 5)))) + policy <- median_stopping_policy() + hyperdrive_config <- + hyperdrive_config(sampling, "Sum", + primary_metric_goal("MAXIMIZE"), + 4, + policy = policy, + estimator = est) + # submit hyperdrive run + hyperdrive_run <- submit_experiment(exp, hyperdrive_config) + wait_for_run_completion(hyperdrive_run, show_output = TRUE) + + child_runs <- + get_child_runs_sorted_by_primary_metric(hyperdrive_run) + expected_best_run <- toString(child_runs[[1]][1]) + expect_equal(length(child_runs), 5) + + child_run_metrics <- get_child_run_metrics(hyperdrive_run) + expect_equal(length(child_run_metrics), 4) + + # find best-performing run + best_run <- get_best_run_by_primary_metric(hyperdrive_run) + + expect_equal(expected_best_run, best_run$id) + + # tear down resources + unlink(tmp_dir_name, recursive = TRUE) + }) \ No newline at end of file diff --git a/tests/testthat/test_install_azureml.R b/tests/testthat/test_install_azureml.R new file mode 100644 index 00000000..56aaee04 --- /dev/null +++ b/tests/testthat/test_install_azureml.R @@ -0,0 +1,6 @@ +context("install azureml") + +test_that("install_azureml", { + install_azureml(envname = test_env) + expect_true(TRUE) +}) \ No newline at end of file diff --git a/tests/testthat/test_keyvault.R b/tests/testthat/test_keyvault.R new file mode 100644 index 00000000..63448a4a --- /dev/null +++ b/tests/testthat/test_keyvault.R @@ -0,0 +1,21 @@ +context("keyvault") + +test_that("keyvault tests, list/set/get/delete secrets", +{ + ws <- existing_ws + kv <- get_default_keyvault(ws) + expect_gte(length(list_secrets(kv)), 0) + + secret_name <- paste0("secret", gsub("-", "", build_num)) + secret_value <- paste0("value", gsub("-", "", build_num)) + secrets <- list() + secrets[[ secret_name ]] <- secret_value + + set_secrets(kv, secrets) + expect_equal(get_secrets(kv, list(secret_name))[[ secret_name ]], + secret_value) + + delete_secrets(kv, list(secret_name)) + expect_equal(get_secrets(kv, list(secret_name))[[ secret_name ]], + NULL) +}) \ No newline at end of file diff --git a/tests/testthat/test_model.R b/tests/testthat/test_model.R new file mode 100644 index 00000000..d0a7e7fe --- /dev/null +++ b/tests/testthat/test_model.R @@ -0,0 +1,79 @@ +context("model tests") + +test_that("get, register, download, serialize, deserialize and delete model", { + ws <- existing_ws + + tmp_dir_name <- "tmp_dir" + model_name <- "dummy_model.data" + dir.create(tmp_dir_name) + file.create(file.path(tmp_dir_name, model_name)) + + # register the model + model <- register_model(ws, tmp_dir_name, model_name) + + # get model + ws_model <- get_model(ws, model_name) + + expect_equal(model_name, ws_model$name) + + # download model + download_dir <- "downloaded" + dir.create(download_dir) + path <- download_model(model, download_dir) + expect_equal(file.exists(file.path(download_dir, tmp_dir_name, model_name)), + TRUE) + + # serialize and deserialize model + model_payload <- serialize_model(model) + deserialized_model <- deserialize_to_model(ws, model_payload) + + # delete the model + delete_model(model) +}) + +test_that("create, check container registry and save model package", { + ws <- existing_ws + + tmp_dir_name <- "tmp_dir" + model_name <- "dummy_model.data" + dir.create(tmp_dir_name) + file.create(file.path(tmp_dir_name, model_name)) + + # register the model + model <- register_model(ws, tmp_dir_name, model_name) + + env <- azureml$core$Environment(name = "newenv") + env$register(ws) + + config <- inference_config(entry_script = "dummy_score.py", + environment = env) + + # Create ModelPackage with dockerfile + model_package <- package_model(ws, + c(model), + config, + generate_dockerfile = TRUE) + + # wait for the package to be created + wait_for_model_package_creation(model_package, show_output = TRUE) + + # Check package container registry + cr <- get_model_package_container_registry(model_package) + env_image_details <- env$get_image_details(ws) + expect_equal(cr$address, env_image_details$dockerImage$registry$address) + expect_equal(cr$username, env_image_details$dockerImage$registry$username) + + # save package files locally + save_model_package_files(model_package, + output_directory = "downloaded_package") + expect_equal(file.exists(file.path("downloaded_package", "Dockerfile")), TRUE) + expect_equal(file.exists(file.path("downloaded_package", + "model_config_map.json")), TRUE) + + # Create ModelPackage without dockerfile + model_package <- package_model(ws, c(model), config) + + # wait for the package to be created + wait_for_model_package_creation(model_package, show_output = TRUE) + pull_model_package_image(model_package) +}) diff --git a/tests/testthat/test_webservice.R b/tests/testthat/test_webservice.R new file mode 100644 index 00000000..92df7c9d --- /dev/null +++ b/tests/testthat/test_webservice.R @@ -0,0 +1,60 @@ +context("webservice tests") + +test_that("create, get, generate keys of, and delete webservice", { + skip('skip') + ws <- existing_ws + + tmp_dir_name <- "tmp_dir" + model_name <- "dummy_model.data" + dir.create(tmp_dir_name) + file.create(file.path(tmp_dir_name, model_name)) + + # register the model + model <- register_model(ws, tmp_dir_name, model_name) + + # Create a new environment + env <- r_environment(name = "newenv") + env$register(ws) + + # Create the inference config to use for Webservice + config <- inference_config(entry_script = "dummy_score.R", environment = env) + + # Create ACI deployment config + tags <- reticulate::py_dict('name', 'temp') + aciconfig <- + azureml$core$webservice$AciWebservice$deploy_configuration(cpu_cores = 1, + memory_gb = 1, + tags = tags, + auth_enabled = T) + # Deploy the model + service_name <- paste("svc", build_num, sep="") + service <- deploy_model(ws, + service_name, + models = c(model), + inference_config = config, + deployment_config = aciconfig) + + wait_for_deployment(service, show_output = TRUE) + + # Get webservice + service <- get_webservice(ws, name = service_name) + + # Check the logs + logs <- get_webservice_logs(service) + expect_equal(length(logs), 1) + + # Get the service keys + keys <- get_webservice_keys(service) + expect_equal(length(keys), 2) + + # Try changing secondary key + generate_new_webservice_key(service, key_type = 'Secondary') + new_keys <- get_webservice_keys(service) + expect_equal(length(new_keys), 2) + + # check if the new secondary key is different from the previous one + expect_false(keys[[2]] == new_keys[[2]]) + + # delete the webservice + delete_webservice(service) +}) \ No newline at end of file diff --git a/tests/testthat/test_workspace.R b/tests/testthat/test_workspace.R new file mode 100644 index 00000000..56573bde --- /dev/null +++ b/tests/testthat/test_workspace.R @@ -0,0 +1,37 @@ +context("Workspace") + +subscription_id <- Sys.getenv("TEST_SUBSCRIPTION_ID") +resource_group <- Sys.getenv("TEST_RESOURCE_GROUP") +location <- Sys.getenv("TEST_LOCATION") + +test_that("create, get, save, load and delete workspace", { + # create workspace + workspace_name <- paste0("test_ws", build_num) + existing_ws <- create_workspace(workspace_name, + subscription_id = subscription_id, + resource_group = resource_group, + location = location) + + # retrieve workspace + ws <- get_workspace(workspace_name, + subscription_id = subscription_id, + resource_group = resource_group) + expect_equal(ws$name, existing_ws$name) + get_workspace_details(ws) + kv <- get_default_keyvault(ws) + expect_equal(length(kv$list_secrets()), 0) + + # write config + write_workspace_config(existing_ws) + + # load from config + loaded_ws <- load_workspace_from_config(".") + expect_equal(loaded_ws$name, workspace_name) + + # delete workspace + delete_workspace(existing_ws) + + # negative testing + ws <- get_workspace("random", subscription_id = subscription_id) + expect_equal(ws, NULL) +}) diff --git a/tests/testthat/train_dummy.R b/tests/testthat/train_dummy.R new file mode 100644 index 00000000..18848bf3 --- /dev/null +++ b/tests/testthat/train_dummy.R @@ -0,0 +1,22 @@ +# check if ggplot2 and dplyr are already installed +library("ggplot2") +library("dplyr") + +library(azureml) + +log_metric_to_run("test_metric", 0.5, get_current_run()) + +# Uncomment once base image upgrades the sdk +#log_list_to_run("test_list", c(1, 2, 3)) +#log_row_to_run("test_row", x = 1, y = 2) +#predict_json <- '{ +# "schema_type": "predictions", +# "schema_version": "v1", +# "data": { +# "bin_averages": [0.25, 0.75], +# "bin_errors": [0.013, 0.042], +# "bin_counts": [56, 34], +# "bin_edges": [0.0, 0.5, 1.0] +# } +# }' +#log_predictions_to_run("test_predictions", predict_json) \ No newline at end of file diff --git a/tests/testthat/train_hyperdrive_dummy.R b/tests/testthat/train_hyperdrive_dummy.R new file mode 100644 index 00000000..61eb2205 --- /dev/null +++ b/tests/testthat/train_hyperdrive_dummy.R @@ -0,0 +1,12 @@ +library(azureml) + +run <- get_current_run() + +args <- commandArgs(trailingOnly = TRUE) +number_1 <- args[2] +log_metric_to_run("First Number", number_1, run) +number_2 <- args[4] +log_metric_to_run("Second Number", number_2, run) + +sum <- as.numeric(number_1) + as.numeric(number_2) +log_metric_to_run("Sum", sum, run) \ No newline at end of file diff --git a/vignettes/cnn-tuning-with-hyperdrive/cifar10_cnn.R b/vignettes/cnn-tuning-with-hyperdrive/cifar10_cnn.R new file mode 100644 index 00000000..a782ab20 --- /dev/null +++ b/vignettes/cnn-tuning-with-hyperdrive/cifar10_cnn.R @@ -0,0 +1,126 @@ +#' Modified from: "https://github.com/rstudio/keras/blob/master/vignettes/ +#' examples/cifar10_cnn.R" +#' +#' Train a simple deep CNN on the CIFAR10 small images dataset. +#' +#' It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 +#' epochs, though it is still underfitting at that point. + +library("keras") +install_keras() + +library(azureml) + +# Parameters -------------------------------------------------------------- + +args <- commandArgs(trailingOnly = TRUE) + +current_run <- get_current_run() + +batch_size <- as.numeric(args[2]) +log_metric_to_run("batch_size", batch_size, current_run) + +epochs <- as.numeric(args[4]) +log_metric_to_run("epochs", epochs, current_run) + +lr <- as.numeric(args[6]) +log_metric_to_run("lr", lr, current_run) + +decay <- as.numeric(args[8]) +log_metric_to_run("decay", decay, current_run) + +data_augmentation <- TRUE + + +# Data Preparation -------------------------------------------------------- + +# See ?dataset_cifar10 for more info +cifar10 <- dataset_cifar10() + +# Feature scale RGB values in test and train inputs +x_train <- cifar10$train$x/255 +x_test <- cifar10$test$x/255 +y_train <- to_categorical(cifar10$train$y, num_classes = 10) +y_test <- to_categorical(cifar10$test$y, num_classes = 10) + + +# Defining Model ---------------------------------------------------------- + +# Initialize sequential model +model <- keras_model_sequential() + +model %>% + + # Start with hidden 2D convolutional layer being fed 32x32 pixel images + layer_conv_2d( + filter = 32, kernel_size = c(3,3), padding = "same", + input_shape = c(32, 32, 3) + ) %>% + layer_activation("relu") %>% + + # Second hidden layer + layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>% + layer_activation("relu") %>% + + # Use max pooling + layer_max_pooling_2d(pool_size = c(2, 2)) %>% + layer_dropout(0.25) %>% + + # 2 additional hidden 2D convolutional layers + layer_conv_2d(filter = 32, kernel_size = c(3, 3), padding = "same") %>% + layer_activation("relu") %>% + layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>% + layer_activation("relu") %>% + + # Use max pooling once more + layer_max_pooling_2d(pool_size = c(2, 2)) %>% + layer_dropout(0.25) %>% + + # Flatten max filtered output into feature vector + # and feed into dense layer + layer_flatten() %>% + layer_dense(512) %>% + layer_activation("relu") %>% + layer_dropout(0.5) %>% + + # Outputs from dense layer are projected onto 10 unit output layer + layer_dense(10) %>% + layer_activation("softmax") + +opt <- optimizer_rmsprop(lr, decay) + +model %>% + compile(loss = "categorical_crossentropy", + optimizer = opt, + metrics = "accuracy" +) + + +# Training ---------------------------------------------------------------- + +if (!data_augmentation){ + + model %>% + fit(x_train, + y_train, + batch_size = batch_size, + epochs = epochs, + validation_data = list(x_test, y_test), + shuffle = TRUE + ) + +} else { + + datagen <- image_data_generator(rotation_range = 20, + width_shift_range = 0.2, + height_shift_range = 0.2, + horizontal_flip = TRUE + ) + + datagen %>% fit_image_data_generator(x_train) + + results <- evaluate(model, x_train, y_train, batch_size) + log_metric_to_run("Loss", results[[1]], current_run) + cat("Loss: ", results[[1]], "\n") + cat("Accuracy: ", results[[2]], "\n") +} \ No newline at end of file diff --git a/vignettes/cnn-tuning-with-hyperdrive/cnn-tuning-with-hyperdrive.Rmd b/vignettes/cnn-tuning-with-hyperdrive/cnn-tuning-with-hyperdrive.Rmd new file mode 100644 index 00000000..c89e5980 --- /dev/null +++ b/vignettes/cnn-tuning-with-hyperdrive/cnn-tuning-with-hyperdrive.Rmd @@ -0,0 +1,195 @@ +--- +title: "Hyperparameter Tuning a Keras Model with HyperDrive" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Hyperparameter Tuning a Keras Mode with HyperDrive"l} + %\VignetteEngine{knitr::rmarkdown} + \use_package{UTF-8} +--- + +This article demonstrates how you can efficiently tune hyperparameters for a model using AzureML SDK for R. +We will train a Keras model on the CIFAR10 dataset, automate hyperparameter exploration, launch parallel jobs, log our results, and find the best run using AzureML's HyperDrive service. + +### What are hyperparameters? + +Hyperparameters are variable parameters chosen to train a model. Learning rate, number of epochs, and batch size are all examples of hyperparameters. + +Using brute-force methods to find the optimal values for parameters can be time-consuming, and poor-performing runs can result in wasted money. To avoid this, HyperDrive automates hyperparameter exploration in a time-saving and cost-effective manner by launching several parallel runs with different configurations and finding the configuration that results in best performance on your primary metric. + +Let's get started with the example to see how it works! + +## 1. Set up the experiment + +First, we will prepare for training by loading the required package, initializing a workspace, and creating an experiment. + +### Import package + +```{r Import package, eval=FALSE} +library("azureml") +``` + +### Initialize a workspace + +The `Workspace` is the top-level resource for the service. It provides us with a centralized place to work with all the artifacts we will create. + +You can create a `Workspace` object from a local `config.json` file +```{r Initialize a workspace, eval=FALSE} +ws <- load_workspace_from_config() +``` + +Or load an existing workspace from your Azure Machine Learning account +```{r Load a workspace, eval=FALSE} +ws <- get_workspace("", "", "") +``` + +### Create a deep learning experiment + +For this example, we will create an experiment named "hyperdrive-cifar10". + +```{r Create a deep learning experiment, eval=FALSE} +exp <- experiment(workspace = ws, name = 'hyperdrive-cifar10') +``` + +## 2. Create a compute target + +Now, we will create a compute target for our job to run on. In this example, we are creating a GPU-enabled compute cluster. +```{r Create a compute target, eval=FALSE} +cluster_name <- "rcluster" +compute_target <- get_compute(ws, cluster_name = cluster_name) +if (is.null(compute_target)) +{ + vm_size <- "STANDARD_NC6" + compute_target <- create_aml_compute(workspace = ws, cluster_name = cluster_name, + vm_size = vm_size, max_nodes = 1) +} +wait_for_provisioning_completion(compute_target) +``` + +## 3. Prepare training script + +In order to collect and upload run metrics, we need to import the `azureml` package at the top of our training script, ["cifar10_cnn.R"](cifar10_cnn.R). + +```r +library("azureml") +``` + +Then, we need to edit our script so that it can log our parameters. We will use the `log_metric_to_run` function to log our hyperparameters at the top of the script and to log our primary metric at the bottom of the script. + +```r +current_run <- get_current_run() +... +log_metric_to_run("batch_size", batch_size, current_run) +... +log_metric_to_run("epochs", epochs, current_run) +... +log_metric_to_run("lr", lr, current_run) +... +log_metric_to_run("decay", decay, current_run) +``` + +```r +log_metric_to_run("Loss", results[[1]], current_run) +``` + +## 4. Create an estimator + +An `Estimator` offers a simple way to launch a training job on a compute target. + +Our training script will need the Keras package to run, and we can have it installed in the Docker container where our job will run by passing the package name via the `cran_packages` parameter. +```{r Create an estimator, eval=FALSE} +est <- estimator(source_directory = ".", entry_script = "cifar10_cnn.R", + compute_target = compute_target, cran_packages = c("keras")) +``` + +## 5. Set HyperDrive configuration + +### Define search space + +In this experiment, we will use four hyperparameters: batch size, number of epochs, learning rate, and decay. In order to begin tuning, we must define the range of values we would like to pull from and how they will be distributed. This is called a parameter space definition and can be created with discrete or continuous ranges. + +__Discrete hyperparameters__ are specified as a choice among discrete values represented as a list. + +Advanced discrete hyperparameters can also be specified using a distribution. The following distributions are supported: + + * `quniform(low, high, q)` + * `qloguniform(low, high, q)` + * `qnormal(mu, sigma, q)` + * `qlognormal(mu, sigma, q)` + +__Continuous hyperparameters__ are specified as a distribution over a continuous range of values. The following distributions are supported: + + * `uniform(low, high)` + * `loguniform(low, high)` + * `normal(mu, sigma)` + * `lognormal(mu, sigma)` + +Here, we will use the `random_parameter_sampling` function to define the search space for each hyperparameter. `batch_size` and `epochs` will be chosen from discrete sets while `lr` and `decay` will be drawn from continuous distributions. + +Other sampling function options are: + + * `grid_parameter_sampling(parameter_space)` + * `bayesian_parameter_sampling(parameter_space)` + +```{r Define search space, eval=FALSE} +sampling <- random_parameter_sampling(list(batch_size = choice(c(16, 32, 64)), + epochs = choice(c(200, 350, 500)), + lr = normal(0.0001, 0.005), + decay = uniform(1e-6, 3e-6))) +``` + +### Define termination policy + +To prevent resource waste, we should detect and terminate poorly performing runs. HyperDrive will do this automatically if we set up an early termination policy. + +Here, we will use the `bandit_policy` which terminates any runs where the primary metric is not within the specified slack factor with respect to the best performing training run. + +```{r Define termination policy, eval=FALSE} +policy <- bandit_policy(slack_factor = 0.15) +``` + +Other termination policy options are: + + * `median_stopping_policy(evaluation_interval, delay_evaluation)` + * `truncation_selection_policy(truncation_percentage, evaluation_interval, delay_evaluation)` + +If no policy is provided, all runs will continue to completion regardless of performance. + +### Finalize configuration + +Now, we can create a `HyperDriveConfig` object to define our group of jobs. Along with our sampling and policy definitions, we need to specify the name of the primary metric that we want to track and whether we want to maximize it or minimize it. + +```{r Create Hyperdrive run configuration, eval=FALSE} +hyperdrive_config <- hyperdrive_config(hyperparameter_sampling = sampling, + primary_metric_goal("MINIMIZE"), + primary_metric_name = "Loss", + max_total_runs = 4, + policy = policy, + estimator = est) +``` + +## 6. Submit HyperDrive run + +Submitting our experiment will start multiple simultaneous runs and return a `HyperDriveRun` object that we will use to interface with the run history during and after the job. + +```{r Submit HyperDrive run, eval=FALSE} +hyperdrive_run <- submit_experiment(exp, hyperdrive_config) +wait_for_run_completion(hyperdrive_run, show_output = TRUE) +``` + +## 7. Analyse runs by performance + +Finally, we can view and compare the metrics collected during our all of our child runs! + +```{r Analyse runs by performance, eval=FALSE} +child_run_metrics <- get_child_run_metrics(hyperdrive_run) +child_run_metrics + +child_runs <- get_child_runs_sorted_by_primary_metric(hyperdrive_run) +child_runs + +best_run <- get_best_run_by_primary_metric(hyperdrive_run) + +metrics <- get_run_metrics(best_run) +metrics +``` diff --git a/vignettes/installation.Rmd b/vignettes/installation.Rmd new file mode 100644 index 00000000..f61166da --- /dev/null +++ b/vignettes/installation.Rmd @@ -0,0 +1,88 @@ +--- +title: "Installing AzureML SDK for R" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Installing AzureML SDK for R} + %\VignetteEngine{knitr::rmarkdown} + \use_package{UTF-8} +--- + +## 1. Install Anaconda + +If you do not have Anaconda already installed on your machine, you will first need to [install](https://www.anaconda.com/distribution/) it. Choose the 64-bit binary for Python 3.5 or later. + +## 2. Install `azureml` R package with `remotes` +As the `azureml` package is not available on CRAN, you will need `remotes` to install from the GitHub repo. +``` {r Install Remotes, eval=FALSE} +install.packages('remotes') +``` +Then, you can use the `install_github` function to install the package. Until the current repo is opened to the public, you will need to use a [generated personal access token](https://github.com/settings/tokens). When generating the token, make sure to select the "repo" scope. + +``` {r Install Azureml, eval=FALSE} +remotes::install_github('https://github.com/Azure/azureml-sdk-for-r', + auth_token = '', + INSTALL_opts=c("--no-multiarch"), + build_vignettes = FALSE) +``` +Set `build_vignettes` to TRUE if you would like the vignettes to be downloaded along with the main package. + +## 3. Install AzureML Python SDK +Lastly, use the `azureml` R library to install the compiled code. By default, `install_azureml` will install the [latest version of the AzureML Python SDK](https://pypi.org/project/azureml-sdk/) in a conda environment called `r-azureml`. If you would like to override the default version, environment name, or python version, you can pass in those arguments: +``` {r Install AzureML Python SDK, eval=FALSE} +azureml::install_azureml(version = NULL, + envname = "", + conda_python_version = "") +``` + +## 4. Test installation +You can confirm your installation worked by loading the library and successfully retrieving a run. +``` {r Test installation, echo=TRUE} +library(azureml) +get_current_run() +``` + +## Troubleshooting +- In step 3 of the installation, if you get ssl errors on windows, it is due to an +outdated openssl binary. Install the latest openssl binaries from +[here](https://wiki.openssl.org/index.php/Binaries). + +- If installation fails due to this error: + + ```R + Error in strptime(xx, f, tz = tz) : + (converted from warning) unable to identify current timezone 'C': + please set environment variable 'TZ' + In R CMD INSTALL + Error in i.p(...) : + (converted from warning) installation of package ‘C:/.../azureml_0.4.0.tar.gz’ had non-zero exit + status + ``` + + You will need to set your time zone environment variable to GMT and restart the installation process. + + ```R + Sys.setenv(TZ='GMT') + ``` + +- If the following permission error occurs while installing in RStudio, + change your RStudio session to administrator mode, and re-run the installation command. + + ```R + Downloading GitHub repo Azure/azureml-sdk-for-r@master + Skipping 2 packages ahead of CRAN: reticulate, rlang + Running `R CMD build`... + + Error: (converted from warning) invalid package + 'C:/.../file2b441bf23631' + In R CMD INSTALL + Error in i.p(...) : + (converted from warning) installation of package + ‘C:/.../file2b441bf23631’ had non-zero exit status + In addition: Warning messages: + 1: In file(con, "r") : + cannot open file 'C:...\file2b44144a540f': Permission denied + 2: In file(con, "r") : + cannot open file 'C:...\file2b4463c21577': Permission denied + ``` + \ No newline at end of file diff --git a/vignettes/train-with-tensorflow/tf_mnist.R b/vignettes/train-with-tensorflow/tf_mnist.R new file mode 100644 index 00000000..2b8f1b25 --- /dev/null +++ b/vignettes/train-with-tensorflow/tf_mnist.R @@ -0,0 +1,66 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# Copyright 2016 RStudio, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +library("tensorflow") +install_tensorflow(version = "1.13.2-gpu") + +library("azureml") + +# Create the model +x <- tf$placeholder(tf$float32, shape(NULL, 784L)) +W <- tf$Variable(tf$zeros(shape(784L, 10L))) +b <- tf$Variable(tf$zeros(shape(10L))) + +y <- tf$nn$softmax(tf$matmul(x, W) + b) + +# Define loss and optimizer +y_ <- tf$placeholder(tf$float32, shape(NULL, 10L)) +cross_entropy <- tf$reduce_mean(-tf$reduce_sum(y_ * log(y), + reduction_indices = 1L)) +train_step <- tf$train$GradientDescentOptimizer(0.5)$minimize(cross_entropy) + +# Create session and initialize variables +sess <- tf$Session() +sess$run(tf$global_variables_initializer()) + +# Load mnist data ) +datasets <- tf$contrib$learn$datasets +mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = TRUE) + +# Train +for (i in 1:1000) { + batches <- mnist$train$next_batch(100L) + batch_xs <- batches[[1]] + batch_ys <- batches[[2]] + sess$run(train_step, + feed_dict = dict(x = batch_xs, y_ = batch_ys)) +} + +# Test trained model +correct_prediction <- tf$equal(tf$argmax(y, 1L), tf$argmax(y_, 1L)) +accuracy <- tf$reduce_mean(tf$cast(correct_prediction, tf$float32)) +cat("Accuracy: ", sess$run(accuracy, + feed_dict = dict(x = mnist$test$images, + y_ = mnist$test$labels))) + +current_run <- get_current_run() +log_metric_to_run("accuracy", + sess$run(accuracy, feed_dict = dict(x = mnist$test$images, + y_ = mnist$test$labels)), + current_run) + + diff --git a/vignettes/train-with-tensorflow/train-with-tensorflow.Rmd b/vignettes/train-with-tensorflow/train-with-tensorflow.Rmd new file mode 100644 index 00000000..11501102 --- /dev/null +++ b/vignettes/train-with-tensorflow/train-with-tensorflow.Rmd @@ -0,0 +1,107 @@ +--- +title: "Training a TensorFlow Model on MNIST" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Training a TensorFlow Model on MNIST} + %\VignetteEngine{knitr::rmarkdown} + \use_package{UTF-8} +--- + +This article demonstrates how to run a TensorFlow training script at scale using AzureML SDK for R. We will train a TensorFlow model to classify handwritten digits using a deep neural network (DNN) and log our results to the Azure Machine Learning service. + +## 1. Set up the experiment + +Let's prepare for training by loading the required package, initializing a workspace, and creating an experiment. + +### Import package +```{r Import package, eval=FALSE} +library("azureml") +``` + +### Initialize a workspace + +The `Workspace` is the top-level resource for the service. It provides us with a centralized place to work with all the artifacts we will create. + +You can create a `Workspace` object from a local `config.json` file +```{r load a workspace, eval=FALSE} +ws <- load_workspace_from_config() +``` + +Or load an existing workspace from your Azure Machine Learning account +```{r Initialize a workspace, eval=FALSE} +ws <- get_workspace("", "", "") +``` + +### Create an experiment + +For this example, we will create an `Experiment` called "tf-mnist". + +```{r Create an experiment, eval=FALSE} +exp <- experiment(workspace = ws, name = 'tf-mnist') +``` + +## 2. Create a compute target + +Now, we will create a compute target for our TensorFlow job to run on. In this example, we are creating a CPU-enabled compute cluster. +```{r Create a compute target, eval=FALSE} +cluster_name <- "rcluster" + +compute_target <- get_compute(ws, cluster_name = cluster_name) +if (is.null(compute_target)) +{ + vm_size <- "STANDARD_D2_V2" + compute_target <- create_aml_compute(workspace = ws, cluster_name = cluster_name, + vm_size = vm_size, max_nodes = 1) +} + +wait_for_provisioning_completion(compute_target) +``` + +## 3. Prepare training script + +In order to collect and upload run metrics, we need to import the `azureml` package at the top of our training script, ["tf_mnist.R"](tf_mnist.R). + +```r +library("azureml") +``` + +Then, we need to add the `log_metric_to_run` function to track our primary metric, "accuracy", for this experiment. If you have your own training script with several important metrics, simply create a logging call for each one within the script. + +```r +current_run <- get_current_run() +log_metric_to_run("accuracy", + sess$run(accuracy, + feed_dict = dict(x = mnist$test$images, y_ = mnist$test$labels)), + current_run) +``` + +## 4. Create an estimator + +An `Estimator` offers a simple way to launch a training job on a compute target. Our training script will need the TensorFlow package to run, and we can have it installed in the Docker container where our job will run by passing the package name to the `cran_packages` parameter. + +```{r Create an estimator, eval=FALSE} +est <- estimator(source_directory = ".", + entry_script = "tf_mnist.R", + compute_target = compute_target, + cran_packages = c("tensorflow"), + use_gpu = TRUE) +``` + +## 5. Submit a run + +Submitting our experiment will return a `Run` object that we will use to interface with the run history during and after the job. + +```{r Submit a run, eval=FALSE} +run <- submit_experiment(exp, est) +wait_for_run_completion(run, show_output = TRUE) +``` + +### 6. View metrics + +Finally, we can view the metrics collected during our TensorFlow run! + +```{r View metrics, eval=FALSE} +metrics <- get_run_metrics(run) +metrics +```