From fe2c0aefd2148ce96a9094d2707ddeca5187556e Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Mon, 25 Mar 2024 21:36:33 +0100 Subject: [PATCH] (Breaking change) Moved long-running operations to `AndWait` methods (#35) Any long-running operations do require now an explicit call to the method with an explicit `AndWait` suffix --- .codegen/service.R.tmpl | 36 +- .github/workflows/push.yaml | 36 + NAMESPACE | 22 + R/account_access_control_proxy.R | 13 +- R/alerts.R | 20 +- R/apps.R | 25 +- R/artifact_allowlists.R | 8 +- R/automatic_cluster_update.R | 7 +- R/catalogs.R | 22 +- R/clean_rooms.R | 22 +- R/cluster_policies.R | 42 +- R/clusters.R | 770 +++++++++++------- R/command_execution.R | 176 ++-- R/connections.R | 20 +- R/credentials_manager.R | 3 - R/csp_enablement.R | 7 +- R/dashboard_widgets.R | 13 +- R/dashboards.R | 27 +- R/dbfs.R | 48 +- R/dbsql_permissions.R | 13 +- R/default_namespace.R | 11 +- R/esm_enablement.R | 7 +- R/experiments.R | 139 +--- R/external_locations.R | 22 +- R/files.R | 38 +- R/functions.R | 23 +- R/git_credentials.R | 20 +- R/global_init_scripts.R | 20 +- R/grants.R | 13 +- R/groups.R | 26 +- R/instance_pools.R | 40 +- R/instance_profiles.R | 15 +- R/ip_access_lists.R | 25 +- R/jobs.R | 533 ++++++------ R/lakehouse_monitors.R | 38 +- R/lakeview.R | 28 +- R/libraries.R | 15 +- R/metastores.R | 39 +- R/model_registry.R | 173 +--- R/model_versions.R | 23 +- R/online_tables.R | 12 +- R/permission_migration.R | 3 - R/permissions.R | 18 +- R/pipelines.R | 171 ++-- R/policy_families.R | 7 +- R/providers.R | 27 +- R/queries.R | 26 +- R/query_history.R | 2 - R/query_visualizations.R | 13 +- R/recipient_activation.R | 8 +- R/recipients.R | 32 +- R/registered_models.R | 32 +- R/repos.R | 42 +- R/restrict_workspace_admins.R | 11 +- R/schemas.R | 23 +- R/secrets.R | 50 +- R/service_principals.R | 26 +- R/serving_endpoints.R | 229 +++--- R/shares.R | 30 +- R/statement_execution.R | 18 +- R/storage_credentials.R | 26 +- R/system_schemas.R | 13 +- R/table_constraints.R | 8 +- R/tables.R | 28 +- R/token_management.R | 29 +- R/tokens.R | 9 +- R/users.R | 38 +- R/vector_search_endpoints.R | 130 +-- R/vector_search_indexes.R | 38 +- R/volumes.R | 23 +- R/warehouses.R | 375 +++++---- R/workspace.R | 48 +- R/workspace_bindings.R | 18 +- R/workspace_conf.R | 7 +- README.md | 6 +- man/clustersCreate.Rd | 17 +- man/clustersCreateAndWait.Rd | 119 +++ man/clustersDelete.Rd | 12 +- man/clustersDeleteAndWait.Rd | 34 + man/clustersEdit.Rd | 17 +- man/clustersEditAndWait.Rd | 126 +++ man/clustersResize.Rd | 19 +- man/clustersResizeAndWait.Rd | 38 + man/clustersRestart.Rd | 18 +- man/clustersRestartAndWait.Rd | 35 + man/clustersStart.Rd | 14 +- man/clustersStartAndWait.Rd | 34 + man/commandExecutionCancel.Rd | 15 +- man/commandExecutionCancelAndWait.Rd | 39 + man/commandExecutionCreate.Rd | 19 +- man/commandExecutionCreateAndWait.Rd | 36 + man/commandExecutionExecute.Rd | 17 +- man/commandExecutionExecuteAndWait.Rd | 44 + man/jobsCancelRun.Rd | 12 +- man/jobsCancelRunAndWait.Rd | 27 + man/jobsGetRun.Rd | 14 +- man/jobsGetRunAndWait.Rd | 37 + man/jobsRepairRun.Rd | 14 +- man/jobsRepairRunAndWait.Rd | 72 ++ man/jobsRunNow.Rd | 14 +- man/jobsRunNowAndWait.Rd | 64 ++ man/jobsSubmit.Rd | 14 +- man/jobsSubmitAndWait.Rd | 64 ++ man/pipelinesStop.Rd | 12 +- man/pipelinesStopAndWait.Rd | 32 + man/servingEndpointsCreate.Rd | 19 +- man/servingEndpointsCreateAndWait.Rd | 37 + man/servingEndpointsUpdateConfig.Rd | 14 +- man/servingEndpointsUpdateConfigAndWait.Rd | 46 ++ man/vectorSearchEndpointsCreateEndpoint.Rd | 18 +- ...torSearchEndpointsCreateEndpointAndWait.Rd | 34 + man/warehousesCreate.Rd | 14 +- man/warehousesCreateAndWait.Rd | 67 ++ man/warehousesEdit.Rd | 14 +- man/warehousesEditAndWait.Rd | 70 ++ man/warehousesStart.Rd | 12 +- man/warehousesStartAndWait.Rd | 26 + man/warehousesStop.Rd | 12 +- man/warehousesStopAndWait.Rd | 26 + 119 files changed, 2893 insertions(+), 2699 deletions(-) create mode 100644 .github/workflows/push.yaml create mode 100644 man/clustersCreateAndWait.Rd create mode 100644 man/clustersDeleteAndWait.Rd create mode 100644 man/clustersEditAndWait.Rd create mode 100644 man/clustersResizeAndWait.Rd create mode 100644 man/clustersRestartAndWait.Rd create mode 100644 man/clustersStartAndWait.Rd create mode 100644 man/commandExecutionCancelAndWait.Rd create mode 100644 man/commandExecutionCreateAndWait.Rd create mode 100644 man/commandExecutionExecuteAndWait.Rd create mode 100644 man/jobsCancelRunAndWait.Rd create mode 100644 man/jobsGetRunAndWait.Rd create mode 100644 man/jobsRepairRunAndWait.Rd create mode 100644 man/jobsRunNowAndWait.Rd create mode 100644 man/jobsSubmitAndWait.Rd create mode 100644 man/pipelinesStopAndWait.Rd create mode 100644 man/servingEndpointsCreateAndWait.Rd create mode 100644 man/servingEndpointsUpdateConfigAndWait.Rd create mode 100644 man/vectorSearchEndpointsCreateEndpointAndWait.Rd create mode 100644 man/warehousesCreateAndWait.Rd create mode 100644 man/warehousesEditAndWait.Rd create mode 100644 man/warehousesStartAndWait.Rd create mode 100644 man/warehousesStopAndWait.Rd diff --git a/.codegen/service.R.tmpl b/.codegen/service.R.tmpl index 2f34d39c..b3ee9f4d 100644 --- a/.codegen/service.R.tmpl +++ b/.codegen/service.R.tmpl @@ -5,8 +5,25 @@ NULL {{range .Methods}}{{.Comment "#' " 80}} #' @param client Required. Instance of DatabricksClient() +{{if .Request}}#'{{range .Request.Fields}} +#' @param {{.SnakeName}} {{if .Required}}Required. {{end}}{{with .Summary}}{{.}}{{else}}This field has no description yet.{{end}}{{end}} +{{end}}{{if .Pagination}}#' +#' @return `data.frame` with all of the response pages. +{{end}}#' +#' @rdname {{.Service.CamelName}}{{.PascalName}} +#' @export +{{- $hasRequiredFields := and .Request (gt (len .Request.RequiredFields) 0) }} +{{.Service.CamelName}}{{.PascalName}} <- function(client{{- if .Request}}{{range .Request.RequiredFields}}, {{.SnakeName}}{{end -}} + {{- range .Request.NonRequiredFields}}, {{.SnakeName}}=NULL{{end}} + {{- end}}) { + {{- template "method-serialize" .}} + {{template "method-call" .}} +} +{{end}} +{{- range .Methods}}{{if and .Wait (not .IsCrudRead)}}{{.Comment "#' " 80}} +#' @param client Required. Instance of DatabricksClient() {{if .Request}} -{{if and .Wait (not .IsCrudRead)}}#' +#' #' @description #' This is a long-running operation, which blocks until {{.Service.TitleName}} on Databricks reach #' {{range $i, $s := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state with the timeout of {{.Wait.Timeout}} minutes, that you can change via `timeout` parameter. @@ -14,23 +31,19 @@ NULL #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. -{{end}} -{{if .Request.RequiredFields}}#' -{{end}}#'{{range .Request.Fields}} +#'{{range .Request.Fields}} #' @param {{.SnakeName}} {{if .Required}}Required. {{end}}{{with .Summary}}{{.}}{{else}}This field has no description yet.{{end}}{{end}} -{{end}}{{if .Pagination}}#' -#' @return `data.frame` with all of the response pages. {{end}}#' -#' @rdname {{.Service.CamelName}}{{.PascalName}} +#' @rdname {{.Service.CamelName}}{{.PascalName}}AndWait #' @export {{- $hasRequiredFields := and .Request (gt (len .Request.RequiredFields) 0) }} -{{.Service.CamelName}}{{.PascalName}} <- function(client{{- if .Request}}{{range .Request.RequiredFields}}, {{.SnakeName}}{{end -}} +{{.Service.CamelName}}{{.PascalName}}AndWait <- function(client{{- if .Request}}{{range .Request.RequiredFields}}, {{.SnakeName}}{{end -}} {{- range .Request.NonRequiredFields}}, {{.SnakeName}}=NULL{{end}} - {{- end}}{{if and .Wait (not .IsCrudRead)}}, timeout={{.Wait.Timeout}}, callback=cli_reporter{{end}}) { + {{- end}}, timeout={{.Wait.Timeout}}, callback=cli_reporter) { {{- template "method-serialize" .}} - {{template "method-call" .}} + {{template "method-call-retried" .}} } - +{{end}} {{end}} {{- define "method-serialize" -}} @@ -44,7 +57,6 @@ NULL {{- define "method-call" -}} {{if .Pagination -}}{{template "method-call-paginated" .}} - {{- else if and .Wait (not .IsCrudRead) -}}{{template "method-call-retried" .}} {{- else}}{{template "method-call-default" .}}{{end}} {{- end -}} diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml new file mode 100644 index 00000000..f2487985 --- /dev/null +++ b/.github/workflows/push.yaml @@ -0,0 +1,36 @@ +name: build + +on: + pull_request: + types: [opened, synchronize] + merge_group: + types: [checks_requested] + push: + # Always run on push to main. The build cache can only be reused + # if it was saved by a run from the repository's default branch. + # The run result will be identical to that from the merge queue + # because the commit is identical, yet we need to perform it to + # seed the build cache. + branches: + - main + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-r@v2 + with: + r-version: release + use-public-rspm: true + + - name: Install dependencies + run: | + Rscript -e "if (!require(devtools)) install.packages('devtools', repos = 'https://cran.rstudio.com')" + Rscript -e "devtools::install_dev_deps('.')" + + - name: Run tests + run: | + Rscript -e "devtools::test()" diff --git a/NAMESPACE b/NAMESPACE index 4de79765..a9b177fe 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -40,8 +40,11 @@ export(clusterPoliciesSetPermissions) export(clusterPoliciesUpdatePermissions) export(clustersChangeOwner) export(clustersCreate) +export(clustersCreateAndWait) export(clustersDelete) +export(clustersDeleteAndWait) export(clustersEdit) +export(clustersEditAndWait) export(clustersEvents) export(clustersGet) export(clustersGetPermissionLevels) @@ -52,18 +55,24 @@ export(clustersListZones) export(clustersPermanentDelete) export(clustersPin) export(clustersResize) +export(clustersResizeAndWait) export(clustersRestart) +export(clustersRestartAndWait) export(clustersSetPermissions) export(clustersSparkVersions) export(clustersStart) +export(clustersStartAndWait) export(clustersUnpin) export(clustersUpdatePermissions) export(commandExecutionCancel) +export(commandExecutionCancelAndWait) export(commandExecutionCommandStatus) export(commandExecutionContextStatus) export(commandExecutionCreate) +export(commandExecutionCreateAndWait) export(commandExecutionDestroy) export(commandExecutionExecute) +export(commandExecutionExecuteAndWait) export(connectionsCreate) export(connectionsDelete) export(connectionsGet) @@ -189,6 +198,7 @@ export(ipAccessListsReplace) export(ipAccessListsUpdate) export(jobsCancelAllRuns) export(jobsCancelRun) +export(jobsCancelRunAndWait) export(jobsCreate) export(jobsDelete) export(jobsDeleteRun) @@ -197,14 +207,18 @@ export(jobsGet) export(jobsGetPermissionLevels) export(jobsGetPermissions) export(jobsGetRun) +export(jobsGetRunAndWait) export(jobsGetRunOutput) export(jobsList) export(jobsListRuns) export(jobsRepairRun) +export(jobsRepairRunAndWait) export(jobsReset) export(jobsRunNow) +export(jobsRunNowAndWait) export(jobsSetPermissions) export(jobsSubmit) +export(jobsSubmitAndWait) export(jobsUpdate) export(jobsUpdatePermissions) export(lakehouseMonitorsCancelRefresh) @@ -296,6 +310,7 @@ export(pipelinesListUpdates) export(pipelinesSetPermissions) export(pipelinesStartUpdate) export(pipelinesStop) +export(pipelinesStopAndWait) export(pipelinesUpdate) export(pipelinesUpdatePermissions) export(policyFamiliesGet) @@ -368,6 +383,7 @@ export(servicePrincipalsPatch) export(servicePrincipalsUpdate) export(servingEndpointsBuildLogs) export(servingEndpointsCreate) +export(servingEndpointsCreateAndWait) export(servingEndpointsDelete) export(servingEndpointsExportMetrics) export(servingEndpointsGet) @@ -380,6 +396,7 @@ export(servingEndpointsPut) export(servingEndpointsQuery) export(servingEndpointsSetPermissions) export(servingEndpointsUpdateConfig) +export(servingEndpointsUpdateConfigAndWait) export(servingEndpointsUpdatePermissions) export(sharesCreate) export(sharesDelete) @@ -431,6 +448,7 @@ export(usersSetPermissions) export(usersUpdate) export(usersUpdatePermissions) export(vectorSearchEndpointsCreateEndpoint) +export(vectorSearchEndpointsCreateEndpointAndWait) export(vectorSearchEndpointsDeleteEndpoint) export(vectorSearchEndpointsGetEndpoint) export(vectorSearchEndpointsListEndpoints) @@ -448,8 +466,10 @@ export(volumesList) export(volumesRead) export(volumesUpdate) export(warehousesCreate) +export(warehousesCreateAndWait) export(warehousesDelete) export(warehousesEdit) +export(warehousesEditAndWait) export(warehousesGet) export(warehousesGetPermissionLevels) export(warehousesGetPermissions) @@ -458,7 +478,9 @@ export(warehousesList) export(warehousesSetPermissions) export(warehousesSetWorkspaceWarehouseConfig) export(warehousesStart) +export(warehousesStartAndWait) export(warehousesStop) +export(warehousesStopAndWait) export(warehousesUpdatePermissions) export(workspaceBindingsGet) export(workspaceBindingsGetBindings) diff --git a/R/account_access_control_proxy.R b/R/account_access_control_proxy.R index 686ef62d..8bff196c 100755 --- a/R/account_access_control_proxy.R +++ b/R/account_access_control_proxy.R @@ -9,9 +9,6 @@ NULL #' is grantable if the rule set on the resource can contain an access rule of #' the role. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param resource Required. The resource name for which assignable roles will be listed. #' @@ -22,16 +19,12 @@ accountAccessControlProxyGetAssignableRolesForResource <- function(client, resou client$do("GET", "/api/2.0/preview/accounts/access-control/assignable-roles", query = query) } - #' Get a rule set. #' #' Get a rule set by its name. A rule set is always attached to a resource and #' contains a list of access rules on the said resource. Currently only a #' default rule set for each resource is supported. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param etag Required. Etag used for versioning. #' @param name Required. The ruleset name associated with the request. @@ -42,16 +35,12 @@ accountAccessControlProxyGetRuleSet <- function(client, name, etag) { query <- list(etag = etag, name = name) client$do("GET", "/api/2.0/preview/accounts/access-control/rule-sets", query = query) } - #' Update a rule set. #' #' Replace the rules of a rule set. First, use a GET rule set request to read #' the current version of the rule set before modifying it. This pattern helps #' prevent conflicts between concurrent updates. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the rule set. #' @param rule_set Required. This field has no description yet. @@ -63,3 +52,5 @@ accountAccessControlProxyUpdateRuleSet <- function(client, name, rule_set) { client$do("PUT", "/api/2.0/preview/accounts/access-control/rule-sets", body = body) } + + diff --git a/R/alerts.R b/R/alerts.R index c669829f..ce5723ab 100755 --- a/R/alerts.R +++ b/R/alerts.R @@ -9,9 +9,6 @@ NULL #' a query, evaluates a condition of its result, and notifies users or #' notification destinations if the condition was met. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the alert. #' @param options Required. Alert configuration options. @@ -26,16 +23,12 @@ alertsCreate <- function(client, name, options, query_id, parent = NULL, rearm = rearm = rearm) client$do("POST", "/api/2.0/preview/sql/alerts", body = body) } - #' Delete an alert. #' #' Deletes an alert. Deleted alerts are no longer accessible and cannot be #' restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to #' the trash. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param alert_id Required. This field has no description yet. #' @@ -45,14 +38,10 @@ alertsDelete <- function(client, alert_id) { client$do("DELETE", paste("/api/2.0/preview/sql/alerts/", alert_id, sep = "")) } - #' Get an alert. #' #' Gets an alert. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param alert_id Required. This field has no description yet. #' @@ -62,7 +51,6 @@ alertsGet <- function(client, alert_id) { client$do("GET", paste("/api/2.0/preview/sql/alerts/", alert_id, sep = "")) } - #' Get alerts. #' #' Gets a list of alerts. @@ -73,14 +61,10 @@ alertsGet <- function(client, alert_id) { alertsList <- function(client) { client$do("GET", "/api/2.0/preview/sql/alerts") } - #' Update an alert. #' #' Updates an alert. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param alert_id Required. This field has no description yet. #' @param name Required. Name of the alert. @@ -95,3 +79,7 @@ alertsUpdate <- function(client, alert_id, name, options, query_id, rearm = NULL client$do("PUT", paste("/api/2.0/preview/sql/alerts/", alert_id, sep = ""), body = body) } + + + + diff --git a/R/apps.R b/R/apps.R index 56fc90fc..cfa8671c 100755 --- a/R/apps.R +++ b/R/apps.R @@ -7,9 +7,6 @@ NULL #' #' Creates and deploys an application. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param manifest Required. Manifest that specifies the application requirements. #' @param resources Information passed at app deployment time to fulfill app dependencies. @@ -20,14 +17,10 @@ appsCreate <- function(client, manifest, resources = NULL) { body <- list(manifest = manifest, resources = resources) client$do("POST", "/api/2.0/preview/apps/deployments", body = body) } - #' Delete an application. #' #' Delete an application definition #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of an application. #' @@ -37,14 +30,10 @@ appsDeleteApp <- function(client, name) { client$do("DELETE", paste("/api/2.0/preview/apps/instances/", name, sep = "")) } - #' Get definition for an application. #' #' Get an application definition #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of an application. #' @@ -54,14 +43,10 @@ appsGetApp <- function(client, name) { client$do("GET", paste("/api/2.0/preview/apps/instances/", name, sep = "")) } - #' Get deployment status for an application. #' #' Get deployment status for an application #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param deployment_id Required. The deployment id for an application. #' @param include_app_log Boolean flag to include application logs. @@ -73,7 +58,6 @@ appsGetAppDeploymentStatus <- function(client, deployment_id, include_app_log = client$do("GET", paste("/api/2.0/preview/apps/deployments/", deployment_id, sep = ""), query = query) } - #' List all applications. #' #' List all available applications @@ -84,14 +68,10 @@ appsGetAppDeploymentStatus <- function(client, deployment_id, include_app_log = appsGetApps <- function(client) { client$do("GET", "/api/2.0/preview/apps/instances") } - #' Get deployment events for an application. #' #' Get deployment events for an application #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of an application. #' @@ -102,3 +82,8 @@ appsGetEvents <- function(client, name) { client$do("GET", paste("/api/2.0/preview/apps/", name, "/events", , sep = "")) } + + + + + diff --git a/R/artifact_allowlists.R b/R/artifact_allowlists.R index 75de9822..401ffde1 100755 --- a/R/artifact_allowlists.R +++ b/R/artifact_allowlists.R @@ -8,9 +8,6 @@ NULL #' Get the artifact allowlist of a certain artifact type. The caller must be a #' metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param artifact_type Required. The artifact type of the allowlist. #' @@ -21,16 +18,12 @@ artifactAllowlistsGet <- function(client, artifact_type) { client$do("GET", paste("/api/2.1/unity-catalog/artifact-allowlists/", artifact_type, sep = "")) } - #' Set an artifact allowlist. #' #' Set the artifact allowlist of a certain artifact type. The whole artifact #' allowlist is replaced with the new allowlist. The caller must be a metastore #' admin or have the **MANAGE ALLOWLIST** privilege on the metastore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param artifact_matchers Required. A list of allowed artifact match patterns. #' @param artifact_type Required. The artifact type of the allowlist. @@ -43,3 +36,4 @@ artifactAllowlistsUpdate <- function(client, artifact_type, artifact_matchers) { sep = ""), body = body) } + diff --git a/R/automatic_cluster_update.R b/R/automatic_cluster_update.R index 8f65b93b..d0df1ae2 100755 --- a/R/automatic_cluster_update.R +++ b/R/automatic_cluster_update.R @@ -7,8 +7,6 @@ NULL #' #' Gets the automatic cluster update setting. #' @param client Required. Instance of DatabricksClient() - - #' #' @param etag etag used for versioning. #' @@ -19,7 +17,6 @@ automaticClusterUpdateGet <- function(client, etag = NULL) { client$do("GET", "/api/2.0/settings/types/automatic_cluster_update/names/default", query = query) } - #' Update the automatic cluster update setting. #' #' Updates the automatic cluster update setting for the workspace. A fresh etag @@ -28,9 +25,6 @@ automaticClusterUpdateGet <- function(client, etag = NULL) { #' If the setting is updated concurrently, `PATCH` fails with 409 and the #' request must be retried by using the fresh etag in the 409 response. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param allow_missing Required. This should always be set to true for Settings API. #' @param field_mask Required. Field mask is required to be passed into the PATCH request. @@ -44,3 +38,4 @@ automaticClusterUpdateUpdate <- function(client, allow_missing, setting, field_m body = body) } + diff --git a/R/catalogs.R b/R/catalogs.R index c330ed95..c0fe0f3b 100755 --- a/R/catalogs.R +++ b/R/catalogs.R @@ -8,9 +8,6 @@ NULL #' Creates a new catalog instance in the parent metastore if the caller is a #' metastore admin or has the **CREATE_CATALOG** privilege. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided free-form text description. #' @param connection_name The name of the connection to an external data source. @@ -30,15 +27,11 @@ catalogsCreate <- function(client, name, comment = NULL, connection_name = NULL, share_name = share_name, storage_root = storage_root) client$do("POST", "/api/2.1/unity-catalog/catalogs", body = body) } - #' Delete a catalog. #' #' Deletes the catalog that matches the supplied name. The caller must be a #' metastore admin or the owner of the catalog. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param force Force deletion even if the catalog is not empty. #' @param name Required. The name of the catalog. @@ -50,16 +43,12 @@ catalogsDelete <- function(client, name, force = NULL) { client$do("DELETE", paste("/api/2.1/unity-catalog/catalogs/", name, sep = ""), query = query) } - #' Get a catalog. #' #' Gets the specified catalog in a metastore. The caller must be a metastore #' admin, the owner of the catalog, or a user that has the **USE_CATALOG** #' privilege set for their account. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param include_browse Whether to include catalogs in the response for which the principal can only access selective metadata for. #' @param name Required. The name of the catalog. @@ -70,7 +59,6 @@ catalogsGet <- function(client, name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/catalogs/", name, sep = ""), query = query) } - #' List catalogs. #' #' Gets an array of catalogs in the metastore. If the caller is the metastore @@ -79,8 +67,6 @@ catalogsGet <- function(client, name, include_browse = NULL) { #' retrieved. There is no guarantee of a specific ordering of the elements in #' the array. #' @param client Required. Instance of DatabricksClient() - - #' #' @param include_browse Whether to include catalogs in the response for which the principal can only access selective metadata for. #' @@ -95,16 +81,12 @@ catalogsList <- function(client, include_browse = NULL) { return(json$catalogs) } - #' Update a catalog. #' #' Updates the catalog that matches the supplied name. The caller must be either #' the owner of the catalog, or a metastore admin (when changing the owner field #' of the catalog). #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided free-form text description. #' @param enable_predictive_optimization Whether predictive optimization should be enabled for this object and objects under it. @@ -124,3 +106,7 @@ catalogsUpdate <- function(client, name, comment = NULL, enable_predictive_optim body = body) } + + + + diff --git a/R/clean_rooms.R b/R/clean_rooms.R index ba491a13..976343f3 100755 --- a/R/clean_rooms.R +++ b/R/clean_rooms.R @@ -8,9 +8,6 @@ NULL #' Creates a new clean room with specified colaborators. The caller must be a #' metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided free-form text description. #' @param name Required. Name of the clean room. @@ -22,15 +19,11 @@ cleanRoomsCreate <- function(client, name, remote_detailed_info, comment = NULL) body <- list(comment = comment, name = name, remote_detailed_info = remote_detailed_info) client$do("POST", "/api/2.1/unity-catalog/clean-rooms", body = body) } - #' Delete a clean room. #' #' Deletes a data object clean room from the metastore. The caller must be an #' owner of the clean room. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the clean room. #' @@ -40,15 +33,11 @@ cleanRoomsDelete <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/clean-rooms/", name, sep = "")) } - #' Get a clean room. #' #' Gets a data object clean room from the metastore. The caller must be a #' metastore admin or the owner of the clean room. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param include_remote_details Whether to include remote details (central) on the clean room. #' @param name Required. The name of the clean room. @@ -60,15 +49,12 @@ cleanRoomsGet <- function(client, name, include_remote_details = NULL) { client$do("GET", paste("/api/2.1/unity-catalog/clean-rooms/", name, sep = ""), query = query) } - #' List clean rooms. #' #' Gets an array of data object clean rooms from the metastore. The caller must #' be a metastore admin or the owner of the clean room. There is no guarantee of #' a specific ordering of the elements in the array. #' @param client Required. Instance of DatabricksClient() - - #' #' @param max_results Maximum number of clean rooms to return. #' @param page_token Opaque pagination token to go to next page based on previous query. @@ -96,7 +82,6 @@ cleanRoomsList <- function(client, max_results = NULL, page_token = NULL) { return(results) } - #' Update a clean room. #' #' Updates the clean room with the changes and data objects in the request. The @@ -115,9 +100,6 @@ cleanRoomsList <- function(client, max_results = NULL, page_token = NULL) { #' #' Table removals through **update** do not require additional privileges. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_updates Array of shared data object updates. #' @param comment User-provided free-form text description. @@ -133,3 +115,7 @@ cleanRoomsUpdate <- function(client, name, catalog_updates = NULL, comment = NUL body = body) } + + + + diff --git a/R/cluster_policies.R b/R/cluster_policies.R index ea5d82fb..9f115daa 100755 --- a/R/cluster_policies.R +++ b/R/cluster_policies.R @@ -7,9 +7,6 @@ NULL #' #' Creates a new policy with prescribed settings. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param definition Policy definition document expressed in [Databricks Cluster Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html). #' @param description Additional human-readable description of the cluster policy. @@ -29,15 +26,11 @@ clusterPoliciesCreate <- function(client, name, definition = NULL, description = policy_family_id = policy_family_id) client$do("POST", "/api/2.0/policies/clusters/create", body = body) } - #' Delete a cluster policy. #' #' Delete a policy for a cluster. Clusters governed by this policy can still #' run, but cannot be edited. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param policy_id Required. The ID of the policy to delete. #' @@ -47,15 +40,11 @@ clusterPoliciesDelete <- function(client, policy_id) { body <- list(policy_id = policy_id) client$do("POST", "/api/2.0/policies/clusters/delete", body = body) } - #' Update a cluster policy. #' #' Update an existing policy for cluster. This operation may make some clusters #' governed by the previous policy invalid. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param definition Policy definition document expressed in [Databricks Cluster Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html). #' @param description Additional human-readable description of the cluster policy. @@ -76,15 +65,11 @@ clusterPoliciesEdit <- function(client, policy_id, name, definition = NULL, desc policy_family_id = policy_family_id, policy_id = policy_id) client$do("POST", "/api/2.0/policies/clusters/edit", body = body) } - #' Get a cluster policy. #' #' Get a cluster policy entity. Creation and editing is available to admins #' only. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param policy_id Required. Canonical unique identifier for the cluster policy. #' @@ -94,14 +79,10 @@ clusterPoliciesGet <- function(client, policy_id) { query <- list(policy_id = policy_id) client$do("GET", "/api/2.0/policies/clusters/get", query = query) } - #' Get cluster policy permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. #' @@ -112,15 +93,11 @@ clusterPoliciesGetPermissionLevels <- function(client, cluster_policy_id) { client$do("GET", paste("/api/2.0/permissions/cluster-policies/", cluster_policy_id, "/permissionLevels", , sep = "")) } - #' Get cluster policy permissions. #' #' Gets the permissions of a cluster policy. Cluster policies can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. #' @@ -131,13 +108,10 @@ clusterPoliciesGetPermissions <- function(client, cluster_policy_id) { client$do("GET", paste("/api/2.0/permissions/cluster-policies/", cluster_policy_id, sep = "")) } - #' List cluster policies. #' #' Returns a list of policies accessible by the requesting user. #' @param client Required. Instance of DatabricksClient() - - #' #' @param sort_column The cluster policy attribute to sort by. #' @param sort_order The order in which the policies get listed. @@ -153,15 +127,11 @@ clusterPoliciesList <- function(client, sort_column = NULL, sort_order = NULL) { return(json$policies) } - #' Set cluster policy permissions. #' #' Sets permissions on a cluster policy. Cluster policies can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. @@ -173,15 +143,11 @@ clusterPoliciesSetPermissions <- function(client, cluster_policy_id, access_cont client$do("PUT", paste("/api/2.0/permissions/cluster-policies/", cluster_policy_id, sep = ""), body = body) } - #' Update cluster policy permissions. #' #' Updates the permissions on a cluster policy. Cluster policies can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. @@ -194,3 +160,11 @@ clusterPoliciesUpdatePermissions <- function(client, cluster_policy_id, access_c sep = ""), body = body) } + + + + + + + + diff --git a/R/clusters.R b/R/clusters.R index 1a6c7ec1..c4bee265 100755 --- a/R/clusters.R +++ b/R/clusters.R @@ -9,9 +9,6 @@ NULL #' terminated to perform this operation. The service principal application ID #' can be supplied as an argument to `owner_username`. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. . #' @param owner_username Required. New owner of the cluster_id after this RPC. @@ -22,7 +19,6 @@ clustersChangeOwner <- function(client, cluster_id, owner_username) { body <- list(cluster_id = cluster_id, owner_username = owner_username) client$do("POST", "/api/2.0/clusters/change-owner", body = body) } - #' Create new cluster. #' #' Creates a new Spark cluster. This method will acquire new instances from the @@ -34,17 +30,6 @@ clustersChangeOwner <- function(client, cluster_id, owner_username) { #' creation will succeed. Otherwise the cluster will terminate with an #' informative error message. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Clusters on Databricks reach -#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Clusters is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param apply_policy_default_values This field has no description yet. #' @param autoscale Parameters needed in order to automatically scale clusters up and down based on load. @@ -84,8 +69,7 @@ clustersCreate <- function(client, spark_version, apply_policy_default_values = driver_node_type_id = NULL, enable_elastic_disk = NULL, enable_local_disk_encryption = NULL, gcp_attributes = NULL, init_scripts = NULL, instance_pool_id = NULL, node_type_id = NULL, num_workers = NULL, policy_id = NULL, runtime_engine = NULL, single_user_name = NULL, - spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, workload_type = NULL, - timeout = 20, callback = cli_reporter) { + spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, workload_type = NULL) { body <- list(apply_policy_default_values = apply_policy_default_values, autoscale = autoscale, autotermination_minutes = autotermination_minutes, aws_attributes = aws_attributes, azure_attributes = azure_attributes, cluster_log_conf = cluster_log_conf, @@ -97,44 +81,8 @@ clustersCreate <- function(client, spark_version, apply_policy_default_values = num_workers = num_workers, policy_id = policy_id, runtime_engine = runtime_engine, single_user_name = single_user_name, spark_conf = spark_conf, spark_env_vars = spark_env_vars, spark_version = spark_version, ssh_public_keys = ssh_public_keys, workload_type = workload_type) - op_response <- client$do("POST", "/api/2.0/clusters/create", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("RUNNING", c()) - failure_states <- c("ERROR", "TERMINATED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- clustersGet(client, cluster_id = op_response$cluster_id) - status <- poll$state - status_message <- poll$state_message - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::clustersGet(cluster_id=", op_response$cluster_id, - ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/clusters/create", body = body) } - #' Terminate cluster. #' #' Terminates the Spark cluster with the specified ID. The cluster is removed @@ -142,61 +90,15 @@ clustersCreate <- function(client, spark_version, apply_policy_default_values = #' `TERMINATED` state. If the cluster is already in a `TERMINATING` or #' `TERMINATED` state, nothing will happen. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Clusters on Databricks reach -#' TERMINATED state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Clusters is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param cluster_id Required. The cluster to be terminated. #' #' @rdname clustersDelete #' @export -clustersDelete <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { +clustersDelete <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) - op_response <- client$do("POST", "/api/2.0/clusters/delete", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("TERMINATED", c()) - failure_states <- c("ERROR", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- clustersGet(client, cluster_id = cluster_id) - status <- poll$state - status_message <- poll$state_message - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach TERMINATED, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/clusters/delete", body = body) } - #' Update cluster configuration. #' #' Updates the configuration of a cluster to match the provided attributes and @@ -212,17 +114,6 @@ clustersDelete <- function(client, cluster_id, timeout = 20, callback = cli_repo #' #' Clusters created by the Databricks Jobs service cannot be edited. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Clusters on Databricks reach -#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Clusters is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param apply_policy_default_values This field has no description yet. #' @param autoscale Parameters needed in order to automatically scale clusters up and down based on load. @@ -263,8 +154,7 @@ clustersEdit <- function(client, cluster_id, spark_version, apply_policy_default driver_node_type_id = NULL, enable_elastic_disk = NULL, enable_local_disk_encryption = NULL, gcp_attributes = NULL, init_scripts = NULL, instance_pool_id = NULL, node_type_id = NULL, num_workers = NULL, policy_id = NULL, runtime_engine = NULL, single_user_name = NULL, - spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, workload_type = NULL, - timeout = 20, callback = cli_reporter) { + spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, workload_type = NULL) { body <- list(apply_policy_default_values = apply_policy_default_values, autoscale = autoscale, autotermination_minutes = autotermination_minutes, aws_attributes = aws_attributes, azure_attributes = azure_attributes, cluster_id = cluster_id, cluster_log_conf = cluster_log_conf, @@ -276,52 +166,14 @@ clustersEdit <- function(client, cluster_id, spark_version, apply_policy_default num_workers = num_workers, policy_id = policy_id, runtime_engine = runtime_engine, single_user_name = single_user_name, spark_conf = spark_conf, spark_env_vars = spark_env_vars, spark_version = spark_version, ssh_public_keys = ssh_public_keys, workload_type = workload_type) - op_response <- client$do("POST", "/api/2.0/clusters/edit", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("RUNNING", c()) - failure_states <- c("ERROR", "TERMINATED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- clustersGet(client, cluster_id = cluster_id) - status <- poll$state - status_message <- poll$state_message - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/clusters/edit", body = body) } - #' List cluster activity events. #' #' Retrieves a list of events about the activity of a cluster. This API is #' paginated. If there are more events to read, the response includes all the #' nparameters necessary to request the next page of events. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. The ID of the cluster to retrieve events about. #' @param end_time The end time in epoch milliseconds. @@ -356,15 +208,11 @@ clustersEvents <- function(client, cluster_id, end_time = NULL, event_types = NU return(results) } - #' Get cluster info. #' #' Retrieves the information for a cluster given its identifier. Clusters can be #' described while they are running, or up to 60 days after they are terminated. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. The cluster about which to retrieve information. #' @@ -374,14 +222,10 @@ clustersGet <- function(client, cluster_id) { query <- list(cluster_id = cluster_id) client$do("GET", "/api/2.0/clusters/get", query = query) } - #' Get cluster permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. The cluster for which to get or manage permissions. #' @@ -392,15 +236,11 @@ clustersGetPermissionLevels <- function(client, cluster_id) { client$do("GET", paste("/api/2.0/permissions/clusters/", cluster_id, "/permissionLevels", , sep = "")) } - #' Get cluster permissions. #' #' Gets the permissions of a cluster. Clusters can inherit permissions from #' their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. The cluster for which to get or manage permissions. #' @@ -410,7 +250,6 @@ clustersGetPermissions <- function(client, cluster_id) { client$do("GET", paste("/api/2.0/permissions/clusters/", cluster_id, sep = "")) } - #' List all clusters. #' #' Return information about all pinned clusters, active clusters, up to 200 of @@ -423,8 +262,6 @@ clustersGetPermissions <- function(client, cluster_id) { #' clusters, all 45 terminated all-purpose clusters, and the 30 most recently #' terminated job clusters. #' @param client Required. Instance of DatabricksClient() - - #' #' @param can_use_client Filter clusters based on what type of client it can be used for. #' @@ -439,7 +276,6 @@ clustersList <- function(client, can_use_client = NULL) { return(json$clusters) } - #' List node types. #' #' Returns a list of supported Spark node types. These node types can be used to @@ -451,7 +287,6 @@ clustersList <- function(client, can_use_client = NULL) { clustersListNodeTypes <- function(client) { client$do("GET", "/api/2.0/clusters/list-node-types") } - #' List availability zones. #' #' Returns a list of availability zones where clusters can be created in (For @@ -463,7 +298,6 @@ clustersListNodeTypes <- function(client) { clustersListZones <- function(client) { client$do("GET", "/api/2.0/clusters/list-zones") } - #' Permanently delete cluster. #' #' Permanently deletes a Spark cluster. This cluster is terminated and resources @@ -473,9 +307,6 @@ clustersListZones <- function(client) { #' cluster list, and API users can no longer perform any action on permanently #' deleted clusters. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. The cluster to be deleted. #' @@ -485,16 +316,12 @@ clustersPermanentDelete <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) client$do("POST", "/api/2.0/clusters/permanent-delete", body = body) } - #' Pin cluster. #' #' Pinning a cluster ensures that the cluster will always be returned by the #' ListClusters API. Pinning a cluster that is already pinned will have no #' effect. This API can only be called by workspace admins. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. . #' @@ -504,23 +331,11 @@ clustersPin <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) client$do("POST", "/api/2.0/clusters/pin", body = body) } - #' Resize cluster. #' #' Resizes a cluster to have a desired number of workers. This will fail unless #' the cluster is in a `RUNNING` state. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Clusters on Databricks reach -#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Clusters is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param autoscale Parameters needed in order to automatically scale clusters up and down based on load. #' @param cluster_id Required. The cluster to be resized. @@ -528,116 +343,30 @@ clustersPin <- function(client, cluster_id) { #' #' @rdname clustersResize #' @export -clustersResize <- function(client, cluster_id, autoscale = NULL, num_workers = NULL, - timeout = 20, callback = cli_reporter) { +clustersResize <- function(client, cluster_id, autoscale = NULL, num_workers = NULL) { body <- list(autoscale = autoscale, cluster_id = cluster_id, num_workers = num_workers) - op_response <- client$do("POST", "/api/2.0/clusters/resize", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("RUNNING", c()) - failure_states <- c("ERROR", "TERMINATED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- clustersGet(client, cluster_id = cluster_id) - status <- poll$state - status_message <- poll$state_message - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/clusters/resize", body = body) } - #' Restart cluster. #' #' Restarts a Spark cluster with the supplied ID. If the cluster is not #' currently in a `RUNNING` state, nothing will happen. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Clusters on Databricks reach -#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Clusters is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param cluster_id Required. The cluster to be started. #' @param restart_user . #' #' @rdname clustersRestart #' @export -clustersRestart <- function(client, cluster_id, restart_user = NULL, timeout = 20, - callback = cli_reporter) { +clustersRestart <- function(client, cluster_id, restart_user = NULL) { body <- list(cluster_id = cluster_id, restart_user = restart_user) - op_response <- client$do("POST", "/api/2.0/clusters/restart", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("RUNNING", c()) - failure_states <- c("ERROR", "TERMINATED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- clustersGet(client, cluster_id = cluster_id) - status <- poll$state - status_message <- poll$state_message - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/clusters/restart", body = body) } - #' Set cluster permissions. #' #' Sets permissions on a cluster. Clusters can inherit permissions from their #' root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param cluster_id Required. The cluster for which to get or manage permissions. @@ -649,7 +378,6 @@ clustersSetPermissions <- function(client, cluster_id, access_control_list = NUL client$do("PUT", paste("/api/2.0/permissions/clusters/", cluster_id, sep = ""), body = body) } - #' List available Spark versions. #' #' Returns the list of available Spark versions. These versions can be used to @@ -661,7 +389,6 @@ clustersSetPermissions <- function(client, cluster_id, access_control_list = NUL clustersSparkVersions <- function(client) { client$do("GET", "/api/2.0/clusters/spark-versions") } - #' Start terminated cluster. #' #' Starts a terminated Spark cluster with the supplied ID. This works similar to @@ -673,6 +400,58 @@ clustersSparkVersions <- function(client) { #' nodes. * If the cluster is not currently in a `TERMINATED` state, nothing #' will happen. * Clusters launched to run a job cannot be started. #' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id Required. The cluster to be started. +#' +#' @rdname clustersStart +#' @export +clustersStart <- function(client, cluster_id) { + body <- list(cluster_id = cluster_id) + client$do("POST", "/api/2.0/clusters/start", body = body) +} +#' Unpin cluster. +#' +#' Unpinning a cluster will allow the cluster to eventually be removed from the +#' ListClusters API. Unpinning a cluster that is not pinned will have no effect. +#' This API can only be called by workspace admins. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id Required. . +#' +#' @rdname clustersUnpin +#' @export +clustersUnpin <- function(client, cluster_id) { + body <- list(cluster_id = cluster_id) + client$do("POST", "/api/2.0/clusters/unpin", body = body) +} +#' Update cluster permissions. +#' +#' Updates the permissions on a cluster. Clusters can inherit permissions from +#' their root object. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param access_control_list This field has no description yet. +#' @param cluster_id Required. The cluster for which to get or manage permissions. +#' +#' @rdname clustersUpdatePermissions +#' @export +clustersUpdatePermissions <- function(client, cluster_id, access_control_list = NULL) { + body <- list(access_control_list = access_control_list) + client$do("PATCH", paste("/api/2.0/permissions/clusters/", cluster_id, sep = ""), + body = body) +} + +#' Create new cluster. +#' +#' Creates a new Spark cluster. This method will acquire new instances from the +#' cloud provider if necessary. Note: Databricks may not be able to acquire some +#' of the requested nodes, due to cloud provider limitations (account limits, +#' spot price, etc.) or transient network issues. +#' +#' If Databricks acquires at least 85% of the requested on-demand nodes, cluster +#' creation will succeed. Otherwise the cluster will terminate with an +#' informative error message. +#' @param client Required. Instance of DatabricksClient() #' #' @description @@ -682,16 +461,234 @@ clustersSparkVersions <- function(client) { #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param apply_policy_default_values This field has no description yet. +#' @param autoscale Parameters needed in order to automatically scale clusters up and down based on load. +#' @param autotermination_minutes Automatically terminates the cluster after it is inactive for this time in minutes. +#' @param aws_attributes Attributes related to clusters running on Amazon Web Services. +#' @param azure_attributes Attributes related to clusters running on Microsoft Azure. +#' @param cluster_log_conf The configuration for delivering spark logs to a long-term storage destination. +#' @param cluster_name Cluster name requested by the user. +#' @param cluster_source Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. +#' @param custom_tags Additional tags for cluster resources. +#' @param data_security_mode Data security mode decides what data governance model to use when accessing data from a cluster. +#' @param docker_image This field has no description yet. +#' @param driver_instance_pool_id The optional ID of the instance pool for the driver of the cluster belongs. +#' @param driver_node_type_id The node type of the Spark driver. +#' @param enable_elastic_disk Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. +#' @param enable_local_disk_encryption Whether to enable LUKS on cluster VMs' local disks. +#' @param gcp_attributes Attributes related to clusters running on Google Cloud Platform. +#' @param init_scripts The configuration for storing init scripts. +#' @param instance_pool_id The optional ID of the instance pool to which the cluster belongs. +#' @param node_type_id This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. +#' @param num_workers Number of worker nodes that this cluster should have. +#' @param policy_id The ID of the cluster policy used to create the cluster if applicable. +#' @param runtime_engine Decides which runtime engine to be use, e.g. +#' @param single_user_name Single user name if data_security_mode is `SINGLE_USER`. +#' @param spark_conf An object containing a set of optional, user-specified Spark configuration key-value pairs. +#' @param spark_env_vars An object containing a set of optional, user-specified environment variable key-value pairs. +#' @param spark_version Required. The Spark version of the cluster, e.g. +#' @param ssh_public_keys SSH public key contents that will be added to each Spark node in this cluster. +#' @param workload_type This field has no description yet. +#' +#' @rdname clustersCreateAndWait +#' @export +clustersCreateAndWait <- function(client, spark_version, apply_policy_default_values = NULL, + autoscale = NULL, autotermination_minutes = NULL, aws_attributes = NULL, azure_attributes = NULL, + cluster_log_conf = NULL, cluster_name = NULL, cluster_source = NULL, custom_tags = NULL, + data_security_mode = NULL, docker_image = NULL, driver_instance_pool_id = NULL, + driver_node_type_id = NULL, enable_elastic_disk = NULL, enable_local_disk_encryption = NULL, + gcp_attributes = NULL, init_scripts = NULL, instance_pool_id = NULL, node_type_id = NULL, + num_workers = NULL, policy_id = NULL, runtime_engine = NULL, single_user_name = NULL, + spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, workload_type = NULL, + timeout = 20, callback = cli_reporter) { + body <- list(apply_policy_default_values = apply_policy_default_values, autoscale = autoscale, + autotermination_minutes = autotermination_minutes, aws_attributes = aws_attributes, + azure_attributes = azure_attributes, cluster_log_conf = cluster_log_conf, + cluster_name = cluster_name, cluster_source = cluster_source, custom_tags = custom_tags, + data_security_mode = data_security_mode, docker_image = docker_image, driver_instance_pool_id = driver_instance_pool_id, + driver_node_type_id = driver_node_type_id, enable_elastic_disk = enable_elastic_disk, + enable_local_disk_encryption = enable_local_disk_encryption, gcp_attributes = gcp_attributes, + init_scripts = init_scripts, instance_pool_id = instance_pool_id, node_type_id = node_type_id, + num_workers = num_workers, policy_id = policy_id, runtime_engine = runtime_engine, + single_user_name = single_user_name, spark_conf = spark_conf, spark_env_vars = spark_env_vars, + spark_version = spark_version, ssh_public_keys = ssh_public_keys, workload_type = workload_type) + op_response <- client$do("POST", "/api/2.0/clusters/create", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("RUNNING", c()) + failure_states <- c("ERROR", "TERMINATED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- clustersGet(client, cluster_id = op_response$cluster_id) + status <- poll$state + status_message <- poll$state_message + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::clustersGet(cluster_id=", op_response$cluster_id, + ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + +#' Terminate cluster. +#' +#' Terminates the Spark cluster with the specified ID. The cluster is removed +#' asynchronously. Once the termination has completed, the cluster will be in a +#' `TERMINATED` state. If the cluster is already in a `TERMINATING` or +#' `TERMINATED` state, nothing will happen. +#' @param client Required. Instance of DatabricksClient() #' +#' @description +#' This is a long-running operation, which blocks until Clusters on Databricks reach +#' TERMINATED state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Clusters is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. #' -#' @param cluster_id Required. The cluster to be started. +#' @param cluster_id Required. The cluster to be terminated. #' -#' @rdname clustersStart +#' @rdname clustersDeleteAndWait #' @export -clustersStart <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { +clustersDeleteAndWait <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { body <- list(cluster_id = cluster_id) - op_response <- client$do("POST", "/api/2.0/clusters/start", body = body) + op_response <- client$do("POST", "/api/2.0/clusters/delete", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("TERMINATED", c()) + failure_states <- c("ERROR", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- clustersGet(client, cluster_id = cluster_id) + status <- poll$state + status_message <- poll$state_message + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach TERMINATED, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + +#' Update cluster configuration. +#' +#' Updates the configuration of a cluster to match the provided attributes and +#' size. A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state. +#' +#' If a cluster is updated while in a `RUNNING` state, it will be restarted so +#' that the new attributes can take effect. +#' +#' If a cluster is updated while in a `TERMINATED` state, it will remain +#' `TERMINATED`. The next time it is started using the `clusters/start` API, the +#' new attributes will take effect. Any attempt to update a cluster in any other +#' state will be rejected with an `INVALID_STATE` error code. +#' +#' Clusters created by the Databricks Jobs service cannot be edited. +#' @param client Required. Instance of DatabricksClient() + +#' +#' @description +#' This is a long-running operation, which blocks until Clusters on Databricks reach +#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Clusters is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param apply_policy_default_values This field has no description yet. +#' @param autoscale Parameters needed in order to automatically scale clusters up and down based on load. +#' @param autotermination_minutes Automatically terminates the cluster after it is inactive for this time in minutes. +#' @param aws_attributes Attributes related to clusters running on Amazon Web Services. +#' @param azure_attributes Attributes related to clusters running on Microsoft Azure. +#' @param cluster_id Required. ID of the cluser. +#' @param cluster_log_conf The configuration for delivering spark logs to a long-term storage destination. +#' @param cluster_name Cluster name requested by the user. +#' @param cluster_source Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. +#' @param custom_tags Additional tags for cluster resources. +#' @param data_security_mode Data security mode decides what data governance model to use when accessing data from a cluster. +#' @param docker_image This field has no description yet. +#' @param driver_instance_pool_id The optional ID of the instance pool for the driver of the cluster belongs. +#' @param driver_node_type_id The node type of the Spark driver. +#' @param enable_elastic_disk Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. +#' @param enable_local_disk_encryption Whether to enable LUKS on cluster VMs' local disks. +#' @param gcp_attributes Attributes related to clusters running on Google Cloud Platform. +#' @param init_scripts The configuration for storing init scripts. +#' @param instance_pool_id The optional ID of the instance pool to which the cluster belongs. +#' @param node_type_id This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. +#' @param num_workers Number of worker nodes that this cluster should have. +#' @param policy_id The ID of the cluster policy used to create the cluster if applicable. +#' @param runtime_engine Decides which runtime engine to be use, e.g. +#' @param single_user_name Single user name if data_security_mode is `SINGLE_USER`. +#' @param spark_conf An object containing a set of optional, user-specified Spark configuration key-value pairs. +#' @param spark_env_vars An object containing a set of optional, user-specified environment variable key-value pairs. +#' @param spark_version Required. The Spark version of the cluster, e.g. +#' @param ssh_public_keys SSH public key contents that will be added to each Spark node in this cluster. +#' @param workload_type This field has no description yet. +#' +#' @rdname clustersEditAndWait +#' @export +clustersEditAndWait <- function(client, cluster_id, spark_version, apply_policy_default_values = NULL, + autoscale = NULL, autotermination_minutes = NULL, aws_attributes = NULL, azure_attributes = NULL, + cluster_log_conf = NULL, cluster_name = NULL, cluster_source = NULL, custom_tags = NULL, + data_security_mode = NULL, docker_image = NULL, driver_instance_pool_id = NULL, + driver_node_type_id = NULL, enable_elastic_disk = NULL, enable_local_disk_encryption = NULL, + gcp_attributes = NULL, init_scripts = NULL, instance_pool_id = NULL, node_type_id = NULL, + num_workers = NULL, policy_id = NULL, runtime_engine = NULL, single_user_name = NULL, + spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, workload_type = NULL, + timeout = 20, callback = cli_reporter) { + body <- list(apply_policy_default_values = apply_policy_default_values, autoscale = autoscale, + autotermination_minutes = autotermination_minutes, aws_attributes = aws_attributes, + azure_attributes = azure_attributes, cluster_id = cluster_id, cluster_log_conf = cluster_log_conf, + cluster_name = cluster_name, cluster_source = cluster_source, custom_tags = custom_tags, + data_security_mode = data_security_mode, docker_image = docker_image, driver_instance_pool_id = driver_instance_pool_id, + driver_node_type_id = driver_node_type_id, enable_elastic_disk = enable_elastic_disk, + enable_local_disk_encryption = enable_local_disk_encryption, gcp_attributes = gcp_attributes, + init_scripts = init_scripts, instance_pool_id = instance_pool_id, node_type_id = node_type_id, + num_workers = num_workers, policy_id = policy_id, runtime_engine = runtime_engine, + single_user_name = single_user_name, spark_conf = spark_conf, spark_env_vars = spark_env_vars, + spark_version = spark_version, ssh_public_keys = ssh_public_keys, workload_type = workload_type) + op_response <- client$do("POST", "/api/2.0/clusters/edit", body = body) started <- as.numeric(Sys.time()) target_states <- c("RUNNING", c()) failure_states <- c("ERROR", "TERMINATED", c()) @@ -728,42 +725,201 @@ clustersStart <- function(client, cluster_id, timeout = 20, callback = cli_repor rlang::abort(msg, call = rlang::caller_env()) } -#' Unpin cluster. + + + + + + + + + +#' Resize cluster. #' -#' Unpinning a cluster will allow the cluster to eventually be removed from the -#' ListClusters API. Unpinning a cluster that is not pinned will have no effect. -#' This API can only be called by workspace admins. +#' Resizes a cluster to have a desired number of workers. This will fail unless +#' the cluster is in a `RUNNING` state. #' @param client Required. Instance of DatabricksClient() - #' +#' @description +#' This is a long-running operation, which blocks until Clusters on Databricks reach +#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Clusters is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. #' -#' @param cluster_id Required. . +#' @param autoscale Parameters needed in order to automatically scale clusters up and down based on load. +#' @param cluster_id Required. The cluster to be resized. +#' @param num_workers Number of worker nodes that this cluster should have. #' -#' @rdname clustersUnpin +#' @rdname clustersResizeAndWait #' @export -clustersUnpin <- function(client, cluster_id) { - body <- list(cluster_id = cluster_id) - client$do("POST", "/api/2.0/clusters/unpin", body = body) +clustersResizeAndWait <- function(client, cluster_id, autoscale = NULL, num_workers = NULL, + timeout = 20, callback = cli_reporter) { + body <- list(autoscale = autoscale, cluster_id = cluster_id, num_workers = num_workers) + op_response <- client$do("POST", "/api/2.0/clusters/resize", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("RUNNING", c()) + failure_states <- c("ERROR", "TERMINATED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- clustersGet(client, cluster_id = cluster_id) + status <- poll$state + status_message <- poll$state_message + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) } -#' Update cluster permissions. +#' Restart cluster. #' -#' Updates the permissions on a cluster. Clusters can inherit permissions from -#' their root object. +#' Restarts a Spark cluster with the supplied ID. If the cluster is not +#' currently in a `RUNNING` state, nothing will happen. #' @param client Required. Instance of DatabricksClient() +#' +#' @description +#' This is a long-running operation, which blocks until Clusters on Databricks reach +#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Clusters is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param cluster_id Required. The cluster to be started. +#' @param restart_user . +#' +#' @rdname clustersRestartAndWait +#' @export +clustersRestartAndWait <- function(client, cluster_id, restart_user = NULL, timeout = 20, + callback = cli_reporter) { + body <- list(cluster_id = cluster_id, restart_user = restart_user) + op_response <- client$do("POST", "/api/2.0/clusters/restart", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("RUNNING", c()) + failure_states <- c("ERROR", "TERMINATED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- clustersGet(client, cluster_id = cluster_id) + status <- poll$state + status_message <- poll$state_message + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + + + +#' Start terminated cluster. +#' +#' Starts a terminated Spark cluster with the supplied ID. This works similar to +#' `createCluster` except: +#' +#' * The previous cluster id and attributes are preserved. * The cluster starts +#' with the last specified cluster size. * If the previous cluster was an +#' autoscaling cluster, the current cluster starts with the minimum number of +#' nodes. * If the cluster is not currently in a `TERMINATED` state, nothing +#' will happen. * Clusters launched to run a job cannot be started. +#' @param client Required. Instance of DatabricksClient() #' +#' @description +#' This is a long-running operation, which blocks until Clusters on Databricks reach +#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Clusters is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. #' -#' @param access_control_list This field has no description yet. -#' @param cluster_id Required. The cluster for which to get or manage permissions. +#' @param cluster_id Required. The cluster to be started. #' -#' @rdname clustersUpdatePermissions +#' @rdname clustersStartAndWait #' @export -clustersUpdatePermissions <- function(client, cluster_id, access_control_list = NULL) { - body <- list(access_control_list = access_control_list) - client$do("PATCH", paste("/api/2.0/permissions/clusters/", cluster_id, sep = ""), - body = body) +clustersStartAndWait <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { + body <- list(cluster_id = cluster_id) + op_response <- client$do("POST", "/api/2.0/clusters/start", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("RUNNING", c()) + failure_states <- c("ERROR", "TERMINATED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- clustersGet(client, cluster_id = cluster_id) + status <- poll$state + status_message <- poll$state_message + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::clustersGet(cluster_id=", cluster_id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) } + + diff --git a/R/command_execution.R b/R/command_execution.R index dea790ed..371050d5 100755 --- a/R/command_execution.R +++ b/R/command_execution.R @@ -3,6 +3,108 @@ #' @importFrom stats runif NULL +#' Cancel a command. +#' +#' Cancels a currently running command within an execution context. +#' +#' The command ID is obtained from a prior successful call to __execute__. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id This field has no description yet. +#' @param command_id This field has no description yet. +#' @param context_id This field has no description yet. +#' +#' @rdname commandExecutionCancel +#' @export +commandExecutionCancel <- function(client, cluster_id = NULL, command_id = NULL, + context_id = NULL) { + body <- list(clusterId = cluster_id, commandId = command_id, contextId = context_id) + client$do("POST", "/api/1.2/commands/cancel", body = body) +} +#' Get command info. +#' +#' Gets the status of and, if available, the results from a currently executing +#' command. +#' +#' The command ID is obtained from a prior successful call to __execute__. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id Required. This field has no description yet. +#' @param command_id Required. This field has no description yet. +#' @param context_id Required. This field has no description yet. +#' +#' @rdname commandExecutionCommandStatus +#' @export +commandExecutionCommandStatus <- function(client, cluster_id, context_id, command_id) { + query <- list(clusterId = cluster_id, commandId = command_id, contextId = context_id) + client$do("GET", "/api/1.2/commands/status", query = query) +} +#' Get status. +#' +#' Gets the status for an execution context. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id Required. This field has no description yet. +#' @param context_id Required. This field has no description yet. +#' +#' @rdname commandExecutionContextStatus +#' @export +commandExecutionContextStatus <- function(client, cluster_id, context_id) { + query <- list(clusterId = cluster_id, contextId = context_id) + client$do("GET", "/api/1.2/contexts/status", query = query) +} +#' Create an execution context. +#' +#' Creates an execution context for running cluster commands. +#' +#' If successful, this method returns the ID of the new execution context. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id Running cluster id. +#' @param language This field has no description yet. +#' +#' @rdname commandExecutionCreate +#' @export +commandExecutionCreate <- function(client, cluster_id = NULL, language = NULL) { + body <- list(clusterId = cluster_id, language = language) + client$do("POST", "/api/1.2/contexts/create", body = body) +} +#' Delete an execution context. +#' +#' Deletes an execution context. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id Required. This field has no description yet. +#' @param context_id Required. This field has no description yet. +#' +#' @rdname commandExecutionDestroy +#' @export +commandExecutionDestroy <- function(client, cluster_id, context_id) { + body <- list(clusterId = cluster_id, contextId = context_id) + client$do("POST", "/api/1.2/contexts/destroy", body = body) +} +#' Run a command. +#' +#' Runs a cluster command in the given execution context, using the provided +#' language. +#' +#' If successful, it returns an ID for tracking the status of the command's +#' execution. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param cluster_id Running cluster id. +#' @param command Executable code. +#' @param context_id Running context id. +#' @param language This field has no description yet. +#' +#' @rdname commandExecutionExecute +#' @export +commandExecutionExecute <- function(client, cluster_id = NULL, command = NULL, context_id = NULL, + language = NULL) { + body <- list(clusterId = cluster_id, command = command, contextId = context_id, + language = language) + client$do("POST", "/api/1.2/commands/execute", body = body) +} #' Cancel a command. #' #' Cancels a currently running command within an execution context. @@ -18,15 +120,14 @@ NULL #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. - #' #' @param cluster_id This field has no description yet. #' @param command_id This field has no description yet. #' @param context_id This field has no description yet. #' -#' @rdname commandExecutionCancel +#' @rdname commandExecutionCancelAndWait #' @export -commandExecutionCancel <- function(client, cluster_id = NULL, command_id = NULL, +commandExecutionCancelAndWait <- function(client, cluster_id = NULL, command_id = NULL, context_id = NULL, timeout = 20, callback = cli_reporter) { body <- list(clusterId = cluster_id, commandId = command_id, contextId = context_id) op_response <- client$do("POST", "/api/1.2/commands/cancel", body = body) @@ -71,45 +172,7 @@ commandExecutionCancel <- function(client, cluster_id = NULL, command_id = NULL, rlang::abort(msg, call = rlang::caller_env()) } -#' Get command info. -#' -#' Gets the status of and, if available, the results from a currently executing -#' command. -#' -#' The command ID is obtained from a prior successful call to __execute__. -#' @param client Required. Instance of DatabricksClient() - -#' -#' -#' @param cluster_id Required. This field has no description yet. -#' @param command_id Required. This field has no description yet. -#' @param context_id Required. This field has no description yet. -#' -#' @rdname commandExecutionCommandStatus -#' @export -commandExecutionCommandStatus <- function(client, cluster_id, context_id, command_id) { - query <- list(clusterId = cluster_id, commandId = command_id, contextId = context_id) - client$do("GET", "/api/1.2/commands/status", query = query) -} - -#' Get status. -#' -#' Gets the status for an execution context. -#' @param client Required. Instance of DatabricksClient() - - -#' -#' -#' @param cluster_id Required. This field has no description yet. -#' @param context_id Required. This field has no description yet. -#' -#' @rdname commandExecutionContextStatus -#' @export -commandExecutionContextStatus <- function(client, cluster_id, context_id) { - query <- list(clusterId = cluster_id, contextId = context_id) - client$do("GET", "/api/1.2/contexts/status", query = query) -} #' Create an execution context. #' @@ -126,15 +189,14 @@ commandExecutionContextStatus <- function(client, cluster_id, context_id) { #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. - #' #' @param cluster_id Running cluster id. #' @param language This field has no description yet. #' -#' @rdname commandExecutionCreate +#' @rdname commandExecutionCreateAndWait #' @export -commandExecutionCreate <- function(client, cluster_id = NULL, language = NULL, timeout = 20, - callback = cli_reporter) { +commandExecutionCreateAndWait <- function(client, cluster_id = NULL, language = NULL, + timeout = 20, callback = cli_reporter) { body <- list(clusterId = cluster_id, language = language) op_response <- client$do("POST", "/api/1.2/contexts/create", body = body) started <- as.numeric(Sys.time()) @@ -174,23 +236,6 @@ commandExecutionCreate <- function(client, cluster_id = NULL, language = NULL, t rlang::abort(msg, call = rlang::caller_env()) } -#' Delete an execution context. -#' -#' Deletes an execution context. -#' @param client Required. Instance of DatabricksClient() - - -#' -#' -#' @param cluster_id Required. This field has no description yet. -#' @param context_id Required. This field has no description yet. -#' -#' @rdname commandExecutionDestroy -#' @export -commandExecutionDestroy <- function(client, cluster_id, context_id) { - body <- list(clusterId = cluster_id, contextId = context_id) - client$do("POST", "/api/1.2/contexts/destroy", body = body) -} #' Run a command. #' @@ -209,17 +254,16 @@ commandExecutionDestroy <- function(client, cluster_id, context_id) { #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. - #' #' @param cluster_id Running cluster id. #' @param command Executable code. #' @param context_id Running context id. #' @param language This field has no description yet. #' -#' @rdname commandExecutionExecute +#' @rdname commandExecutionExecuteAndWait #' @export -commandExecutionExecute <- function(client, cluster_id = NULL, command = NULL, context_id = NULL, - language = NULL, timeout = 20, callback = cli_reporter) { +commandExecutionExecuteAndWait <- function(client, cluster_id = NULL, command = NULL, + context_id = NULL, language = NULL, timeout = 20, callback = cli_reporter) { body <- list(clusterId = cluster_id, command = command, contextId = context_id, language = language) op_response <- client$do("POST", "/api/1.2/commands/execute", body = body) diff --git a/R/connections.R b/R/connections.R index 3f5d987b..18fa790c 100755 --- a/R/connections.R +++ b/R/connections.R @@ -11,9 +11,6 @@ NULL #' specify connection details and configurations for interaction with the #' external server. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided free-form text description. #' @param connection_type Required. The type of connection. @@ -30,14 +27,10 @@ connectionsCreate <- function(client, name, connection_type, options, comment = options = options, properties = properties, read_only = read_only) client$do("POST", "/api/2.1/unity-catalog/connections", body = body) } - #' Delete a connection. #' #' Deletes the connection that matches the supplied name. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the connection to be deleted. #' @@ -47,14 +40,10 @@ connectionsDelete <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/connections/", name, sep = "")) } - #' Get a connection. #' #' Gets a connection from it's name. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the connection. #' @@ -64,7 +53,6 @@ connectionsGet <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/connections/", name, sep = "")) } - #' List connections. #' #' List all connections. @@ -80,14 +68,10 @@ connectionsList <- function(client) { return(json$connections) } - #' Update a connection. #' #' Updates the connection that matches the supplied name. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the connection. #' @param new_name New name for the connection. @@ -102,3 +86,7 @@ connectionsUpdate <- function(client, name, options, new_name = NULL, owner = NU body = body) } + + + + diff --git a/R/credentials_manager.R b/R/credentials_manager.R index 15bfa7c0..ca2c8cac 100755 --- a/R/credentials_manager.R +++ b/R/credentials_manager.R @@ -8,9 +8,6 @@ NULL #' Exchange tokens with an Identity Provider to get a new access token. It #' allows specifying scopes to determine token permissions. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param partition_id Required. The partition of Credentials store. #' @param scopes Required. Array of scopes for the token request. diff --git a/R/csp_enablement.R b/R/csp_enablement.R index 68fc14bd..5a62fad1 100755 --- a/R/csp_enablement.R +++ b/R/csp_enablement.R @@ -7,8 +7,6 @@ NULL #' #' Gets the compliance security profile setting. #' @param client Required. Instance of DatabricksClient() - - #' #' @param etag etag used for versioning. #' @@ -19,7 +17,6 @@ cspEnablementGet <- function(client, etag = NULL) { client$do("GET", "/api/2.0/settings/types/shield_csp_enablement_ws_db/names/default", query = query) } - #' Update the compliance security profile setting. #' #' Updates the compliance security profile setting for the workspace. A fresh @@ -28,9 +25,6 @@ cspEnablementGet <- function(client, etag = NULL) { #' request. If the setting is updated concurrently, `PATCH` fails with 409 and #' the request must be retried by using the fresh etag in the 409 response. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param allow_missing Required. This should always be set to true for Settings API. #' @param field_mask Required. Field mask is required to be passed into the PATCH request. @@ -44,3 +38,4 @@ cspEnablementUpdate <- function(client, allow_missing, setting, field_mask) { body = body) } + diff --git a/R/dashboard_widgets.R b/R/dashboard_widgets.R index 27fc3f44..be049891 100755 --- a/R/dashboard_widgets.R +++ b/R/dashboard_widgets.R @@ -5,9 +5,6 @@ NULL #' Add widget to a dashboard. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. Dashboard ID returned by :method:dashboards/create. #' @param options Required. This field has no description yet. @@ -23,12 +20,8 @@ dashboardWidgetsCreate <- function(client, dashboard_id, options, width, text = width = width) client$do("POST", "/api/2.0/preview/sql/widgets", body = body) } - #' Remove widget. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Widget ID returned by :method:dashboardwidgets/create. #' @@ -38,12 +31,8 @@ dashboardWidgetsDelete <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/sql/widgets/", id, sep = "")) } - #' Update existing widget. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. Dashboard ID returned by :method:dashboards/create. #' @param id Required. Widget ID returned by :method:dashboardwidgets/create. @@ -61,3 +50,5 @@ dashboardWidgetsUpdate <- function(client, id, dashboard_id, options, width, tex client$do("POST", paste("/api/2.0/preview/sql/widgets/", id, sep = ""), body = body) } + + diff --git a/R/dashboards.R b/R/dashboards.R index 5f48958b..943c64f4 100755 --- a/R/dashboards.R +++ b/R/dashboards.R @@ -5,9 +5,6 @@ NULL #' Create a dashboard object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_filters_enabled Indicates whether the dashboard filters are enabled. #' @param is_favorite Indicates whether this dashboard object should appear in the current user's favorites list. @@ -24,15 +21,11 @@ dashboardsCreate <- function(client, name, dashboard_filters_enabled = NULL, is_ name = name, parent = parent, run_as_role = run_as_role, tags = tags) client$do("POST", "/api/2.0/preview/sql/dashboards", body = body) } - #' Remove a dashboard. #' #' Moves a dashboard to the trash. Trashed dashboards do not appear in list #' views or searches, and cannot be shared. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. This field has no description yet. #' @@ -42,15 +35,11 @@ dashboardsDelete <- function(client, dashboard_id) { client$do("DELETE", paste("/api/2.0/preview/sql/dashboards/", dashboard_id, sep = "")) } - #' Retrieve a definition. #' #' Returns a JSON representation of a dashboard object, including its #' visualization and query objects. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. This field has no description yet. #' @@ -60,7 +49,6 @@ dashboardsGet <- function(client, dashboard_id) { client$do("GET", paste("/api/2.0/preview/sql/dashboards/", dashboard_id, sep = "")) } - #' Get dashboard objects. #' #' Fetch a paginated list of dashboard objects. @@ -68,8 +56,6 @@ dashboardsGet <- function(client, dashboard_id) { #' ### **Warning: Calling this API concurrently 10 or more times could result in #' throttling, service degradation, or a temporary ban.** #' @param client Required. Instance of DatabricksClient() - - #' #' @param order Name of dashboard attribute to order by. #' @param page Page number to retrieve. @@ -99,14 +85,10 @@ dashboardsList <- function(client, order = NULL, page = NULL, page_size = NULL, return(results) } - #' Restore a dashboard. #' #' A restored dashboard appears in list views and searches and can be shared. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. This field has no description yet. #' @@ -117,7 +99,6 @@ dashboardsRestore <- function(client, dashboard_id) { client$do("POST", paste("/api/2.0/preview/sql/dashboards/trash/", dashboard_id, sep = "")) } - #' Change a dashboard definition. #' #' Modify this dashboard definition. This operation only affects attributes of @@ -125,9 +106,6 @@ dashboardsRestore <- function(client, dashboard_id) { #' #' **Note**: You cannot undo this operation. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. This field has no description yet. #' @param name The title of this dashboard that appears in list views and at the top of the dashboard page. @@ -141,3 +119,8 @@ dashboardsUpdate <- function(client, dashboard_id, name = NULL, run_as_role = NU body = body) } + + + + + diff --git a/R/dbfs.R b/R/dbfs.R index b33b1363..81ef807d 100755 --- a/R/dbfs.R +++ b/R/dbfs.R @@ -12,9 +12,6 @@ NULL #' If the block of data exceeds 1 MB, this call will throw an exception with #' ``MAX_BLOCK_SIZE_EXCEEDED``. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param data Required. The base64-encoded data to append to the stream. #' @param handle Required. The handle on an open stream. @@ -25,15 +22,11 @@ dbfsAddBlock <- function(client, handle, data) { body <- list(data = data, handle = handle) client$do("POST", "/api/2.0/dbfs/add-block", body = body) } - #' Close the stream. #' #' Closes the stream specified by the input handle. If the handle does not #' exist, this call throws an exception with ``RESOURCE_DOES_NOT_EXIST``. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param handle Required. The handle on an open stream. #' @@ -43,7 +36,6 @@ dbfsClose <- function(client, handle) { body <- list(handle = handle) client$do("POST", "/api/2.0/dbfs/close", body = body) } - #' Open a stream. #' #' Opens a stream to write to a file and returns a handle to this stream. There @@ -57,9 +49,6 @@ dbfsClose <- function(client, handle) { #' ``add-block`` calls with the handle you have. 3. Issue a ``close`` call with #' the handle you have. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param overwrite The flag that specifies whether to overwrite existing file/files. #' @param path Required. The path of the new file. @@ -70,7 +59,6 @@ dbfsCreate <- function(client, path, overwrite = NULL) { body <- list(overwrite = overwrite, path = path) client$do("POST", "/api/2.0/dbfs/create", body = body) } - #' Delete a file/directory. #' #' Delete the file or directory (optionally recursively delete all files in the @@ -92,9 +80,6 @@ dbfsCreate <- function(client, path, overwrite = NULL) { #' such as selective deletes, and the possibility to automate periodic delete #' jobs. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Required. The path of the file or directory to delete. #' @param recursive Whether or not to recursively delete the directory's contents. @@ -105,15 +90,11 @@ dbfsDelete <- function(client, path, recursive = NULL) { body <- list(path = path, recursive = recursive) client$do("POST", "/api/2.0/dbfs/delete", body = body) } - #' Get the information of a file or directory. #' #' Gets the file information for a file or directory. If the file or directory #' does not exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Required. The path of the file or directory. #' @@ -123,7 +104,6 @@ dbfsGetStatus <- function(client, path) { query <- list(path = path) client$do("GET", "/api/2.0/dbfs/get-status", query = query) } - #' List directory contents or file details. #' #' List the contents of a directory, or details of the file. If the file or @@ -138,9 +118,6 @@ dbfsGetStatus <- function(client, path) { #' system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), #' which provides the same functionality without timing out. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Required. The path of the file or directory. #' @@ -155,7 +132,6 @@ dbfsList <- function(client, path) { return(json$files) } - #' Create a directory. #' #' Creates the given directory and necessary parent directories if they do not @@ -164,9 +140,6 @@ dbfsList <- function(client, path) { #' this operation fails, it might have succeeded in creating some of the #' necessary parent directories. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Required. The path of the new directory. #' @@ -176,7 +149,6 @@ dbfsMkdirs <- function(client, path) { body <- list(path = path) client$do("POST", "/api/2.0/dbfs/mkdirs", body = body) } - #' Move a file. #' #' Moves a file from one location to another location within DBFS. If the source @@ -185,9 +157,6 @@ dbfsMkdirs <- function(client, path) { #' this call throws an exception with `RESOURCE_ALREADY_EXISTS`. If the given #' source path is a directory, this call always recursively moves all files. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param destination_path Required. The destination path of the file or directory. #' @param source_path Required. The source path of the file or directory. @@ -198,7 +167,6 @@ dbfsMove <- function(client, source_path, destination_path) { body <- list(destination_path = destination_path, source_path = source_path) client$do("POST", "/api/2.0/dbfs/move", body = body) } - #' Upload a file. #' #' Uploads a file through the use of multipart form post. It is mainly used for @@ -214,9 +182,6 @@ dbfsMove <- function(client, source_path, destination_path) { #' If you want to upload large files, use the streaming upload. For details, see #' :method:dbfs/create, :method:dbfs/addBlock, :method:dbfs/close. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param contents This parameter might be absent, and instead a posted file will be used. #' @param overwrite The flag that specifies whether to overwrite existing file/files. @@ -228,7 +193,6 @@ dbfsPut <- function(client, path, contents = NULL, overwrite = NULL) { body <- list(contents = contents, overwrite = overwrite, path = path) client$do("POST", "/api/2.0/dbfs/put", body = body) } - #' Get the contents of a file. #' #' Returns the contents of a file. If the file does not exist, this call throws @@ -240,9 +204,6 @@ dbfsPut <- function(client, path, contents = NULL, overwrite = NULL) { #' If `offset + length` exceeds the number of bytes in a file, it reads the #' contents until the end of file. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param length The number of bytes to read starting from the offset. #' @param offset The offset to read from in bytes. @@ -255,3 +216,12 @@ dbfsRead <- function(client, path, length = NULL, offset = NULL) { client$do("GET", "/api/2.0/dbfs/read", query = query) } + + + + + + + + + diff --git a/R/dbsql_permissions.R b/R/dbsql_permissions.R index f53b73d1..10ac74dd 100755 --- a/R/dbsql_permissions.R +++ b/R/dbsql_permissions.R @@ -8,9 +8,6 @@ NULL #' Gets a JSON representation of the access control list (ACL) for a specified #' object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param object_id Required. Object ID. #' @param object_type Required. The type of object permissions to check. @@ -22,15 +19,11 @@ dbsqlPermissionsGet <- function(client, object_type, object_id) { client$do("GET", paste("/api/2.0/preview/sql/permissions/", object_type, "/", object_id, sep = "")) } - #' Set object ACL. #' #' Sets the access control list (ACL) for a specified object. This operation #' will complete rewrite the ACL. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param object_id Required. Object ID. @@ -43,15 +36,11 @@ dbsqlPermissionsSet <- function(client, object_type, object_id, access_control_l client$do("POST", paste("/api/2.0/preview/sql/permissions/", object_type, "/", object_id, sep = ""), body = body) } - #' Transfer object ownership. #' #' Transfers ownership of a dashboard, query, or alert to an active user. #' Requires an admin API key. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param new_owner Email address for the new owner, who must exist in the workspace. #' @param object_id Required. The ID of the object on which to change ownership. @@ -65,3 +54,5 @@ dbsqlPermissionsTransferOwnership <- function(client, object_type, object_id, ne object_id, "/transfer", , sep = ""), body = body) } + + diff --git a/R/default_namespace.R b/R/default_namespace.R index 284611da..494d3e48 100755 --- a/R/default_namespace.R +++ b/R/default_namespace.R @@ -11,8 +11,6 @@ NULL #' setting is updated/deleted concurrently, `DELETE` fails with 409 and the #' request must be retried by using the fresh etag in the 409 response. #' @param client Required. Instance of DatabricksClient() - - #' #' @param etag etag used for versioning. #' @@ -23,13 +21,10 @@ defaultNamespaceDelete <- function(client, etag = NULL) { client$do("DELETE", "/api/2.0/settings/types/default_namespace_ws/names/default", query = query) } - #' Get the default namespace setting. #' #' Gets the default namespace setting. #' @param client Required. Instance of DatabricksClient() - - #' #' @param etag etag used for versioning. #' @@ -40,7 +35,6 @@ defaultNamespaceGet <- function(client, etag = NULL) { client$do("GET", "/api/2.0/settings/types/default_namespace_ws/names/default", query = query) } - #' Update the default namespace setting. #' #' Updates the default namespace setting for the workspace. A fresh etag needs @@ -51,9 +45,6 @@ defaultNamespaceGet <- function(client, etag = NULL) { #' request. If the setting is updated concurrently, `PATCH` fails with 409 and #' the request must be retried by using the fresh etag in the 409 response. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param allow_missing Required. This should always be set to true for Settings API. #' @param field_mask Required. Field mask is required to be passed into the PATCH request. @@ -67,3 +58,5 @@ defaultNamespaceUpdate <- function(client, allow_missing, setting, field_mask) { body = body) } + + diff --git a/R/esm_enablement.R b/R/esm_enablement.R index 7839e49b..7827429f 100755 --- a/R/esm_enablement.R +++ b/R/esm_enablement.R @@ -7,8 +7,6 @@ NULL #' #' Gets the enhanced security monitoring setting. #' @param client Required. Instance of DatabricksClient() - - #' #' @param etag etag used for versioning. #' @@ -19,7 +17,6 @@ esmEnablementGet <- function(client, etag = NULL) { client$do("GET", "/api/2.0/settings/types/shield_esm_enablement_ws_db/names/default", query = query) } - #' Update the enhanced security monitoring setting. #' #' Updates the enhanced security monitoring setting for the workspace. A fresh @@ -28,9 +25,6 @@ esmEnablementGet <- function(client, etag = NULL) { #' request. If the setting is updated concurrently, `PATCH` fails with 409 and #' the request must be retried by using the fresh etag in the 409 response. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param allow_missing Required. This should always be set to true for Settings API. #' @param field_mask Required. Field mask is required to be passed into the PATCH request. @@ -44,3 +38,4 @@ esmEnablementUpdate <- function(client, allow_missing, setting, field_mask) { body = body) } + diff --git a/R/experiments.R b/R/experiments.R index 5af5fece..83200af1 100755 --- a/R/experiments.R +++ b/R/experiments.R @@ -12,9 +12,6 @@ NULL #' #' Throws `RESOURCE_ALREADY_EXISTS` if a experiment with the given name exists. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param artifact_location Location where all artifacts for the experiment are stored. #' @param name Required. Experiment name. @@ -26,7 +23,6 @@ experimentsCreateExperiment <- function(client, name, artifact_location = NULL, body <- list(artifact_location = artifact_location, name = name, tags = tags) client$do("POST", "/api/2.0/mlflow/experiments/create", body = body) } - #' Create a run. #' #' Creates a new run within an experiment. A run is usually a single execution @@ -34,8 +30,6 @@ experimentsCreateExperiment <- function(client, name, artifact_location = NULL, #' `mlflowParam`, `mlflowMetric` and `mlflowRunTag` associated with a single #' execution. #' @param client Required. Instance of DatabricksClient() - - #' #' @param experiment_id ID of the associated experiment. #' @param start_time Unix timestamp in milliseconds of when the run started. @@ -50,16 +44,12 @@ experimentsCreateRun <- function(client, experiment_id = NULL, start_time = NULL user_id = user_id) client$do("POST", "/api/2.0/mlflow/runs/create", body = body) } - #' Delete an experiment. #' #' Marks an experiment and associated metadata, runs, metrics, params, and tags #' for deletion. If the experiment uses FileStore, artifacts associated with #' experiment are also deleted. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. ID of the associated experiment. #' @@ -69,14 +59,10 @@ experimentsDeleteExperiment <- function(client, experiment_id) { body <- list(experiment_id = experiment_id) client$do("POST", "/api/2.0/mlflow/experiments/delete", body = body) } - #' Delete a run. #' #' Marks a run for deletion. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param run_id Required. ID of the run to delete. #' @@ -86,7 +72,6 @@ experimentsDeleteRun <- function(client, run_id) { body <- list(run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/delete", body = body) } - #' Delete runs by creation time. #' #' Bulk delete runs in an experiment that were created prior to or at the @@ -94,9 +79,6 @@ experimentsDeleteRun <- function(client, run_id) { #' from a Databricks Notebook in Python, you can use the client code snippet on #' https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. The ID of the experiment containing the runs to delete. #' @param max_runs An optional positive integer indicating the maximum number of runs to delete. @@ -108,15 +90,11 @@ experimentsDeleteRuns <- function(client, experiment_id, max_timestamp_millis, m body <- list(experiment_id = experiment_id, max_runs = max_runs, max_timestamp_millis = max_timestamp_millis) client$do("POST", "/api/2.0/mlflow/databricks/runs/delete-runs", body = body) } - #' Delete a tag. #' #' Deletes a tag on a run. Tags are run metadata that can be updated during a #' run and after a run completes. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the tag. #' @param run_id Required. ID of the run that the tag was logged under. @@ -127,7 +105,6 @@ experimentsDeleteTag <- function(client, run_id, key) { body <- list(key = key, run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/delete-tag", body = body) } - #' Get metadata. #' #' Gets metadata for an experiment. @@ -140,9 +117,6 @@ experimentsDeleteTag <- function(client, run_id, key) { #' Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name #' exists. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_name Required. Name of the associated experiment. #' @@ -152,14 +126,10 @@ experimentsGetByName <- function(client, experiment_name) { query <- list(experiment_name = experiment_name) client$do("GET", "/api/2.0/mlflow/experiments/get-by-name", query = query) } - #' Get an experiment. #' #' Gets metadata for an experiment. This method works on deleted experiments. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. ID of the associated experiment. #' @@ -169,14 +139,10 @@ experimentsGetExperiment <- function(client, experiment_id) { query <- list(experiment_id = experiment_id) client$do("GET", "/api/2.0/mlflow/experiments/get", query = query) } - #' Get history of a given metric within a run. #' #' Gets a list of all values for the specified metric for a given run. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param max_results Maximum number of Metric records to return per paginated request. #' @param metric_key Required. Name of the metric. @@ -209,14 +175,10 @@ experimentsGetHistory <- function(client, metric_key, max_results = NULL, page_t return(results) } - #' Get experiment permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. The experiment for which to get or manage permissions. #' @@ -227,15 +189,11 @@ experimentsGetPermissionLevels <- function(client, experiment_id) { client$do("GET", paste("/api/2.0/permissions/experiments/", experiment_id, "/permissionLevels", , sep = "")) } - #' Get experiment permissions. #' #' Gets the permissions of an experiment. Experiments can inherit permissions #' from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. The experiment for which to get or manage permissions. #' @@ -245,7 +203,6 @@ experimentsGetPermissions <- function(client, experiment_id) { client$do("GET", paste("/api/2.0/permissions/experiments/", experiment_id, sep = "")) } - #' Get a run. #' #' Gets the metadata, metrics, params, and tags for a run. In the case where @@ -255,9 +212,6 @@ experimentsGetPermissions <- function(client, experiment_id) { #' If there are multiple values with the latest timestamp, return the maximum of #' these values. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param run_id Required. ID of the run to fetch. #' @param run_uuid Deprecated, use run_id instead. ID of the run to fetch. @@ -268,14 +222,11 @@ experimentsGetRun <- function(client, run_id, run_uuid = NULL) { query <- list(run_id = run_id, run_uuid = run_uuid) client$do("GET", "/api/2.0/mlflow/runs/get", query = query) } - #' Get all artifacts. #' #' List artifacts for a run. Takes an optional `artifact_path` prefix. If it is #' specified, the response contains only artifacts with the specified prefix.', #' @param client Required. Instance of DatabricksClient() - - #' #' @param page_token Token indicating the page of artifact results to fetch. #' @param path Filter artifacts matching this path (a relative path from the root artifact directory). @@ -306,13 +257,10 @@ experimentsListArtifacts <- function(client, page_token = NULL, path = NULL, run return(results) } - #' List experiments. #' #' Gets a list of all experiments. #' @param client Required. Instance of DatabricksClient() - - #' #' @param max_results Maximum number of experiments desired. #' @param page_token Token indicating the page of experiments to fetch. @@ -342,7 +290,6 @@ experimentsListExperiments <- function(client, max_results = NULL, page_token = return(results) } - #' Log a batch. #' #' Logs a batch of metrics, params, and tags for a run. If any data failed to be @@ -383,8 +330,6 @@ experimentsListExperiments <- function(client, max_results = NULL, page_token = #' * Metric keys, param keys, and tag keys can be up to 250 characters in length #' * Parameter and tag values can be up to 250 characters in length #' @param client Required. Instance of DatabricksClient() - - #' #' @param metrics Metrics to log. #' @param params Params to log. @@ -398,14 +343,11 @@ experimentsLogBatch <- function(client, metrics = NULL, params = NULL, run_id = body <- list(metrics = metrics, params = params, run_id = run_id, tags = tags) client$do("POST", "/api/2.0/mlflow/runs/log-batch", body = body) } - #' Log inputs to a run. #' #' **NOTE:** Experimental: This API may change or be removed in a future release #' without warning. #' @param client Required. Instance of DatabricksClient() - - #' #' @param datasets Dataset inputs. #' @param run_id ID of the run to log under. @@ -416,16 +358,12 @@ experimentsLogInputs <- function(client, datasets = NULL, run_id = NULL) { body <- list(datasets = datasets, run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/log-inputs", body = body) } - #' Log a metric. #' #' Logs a metric for a run. A metric is a key-value pair (string key, float #' value) with an associated timestamp. Examples include the various metrics #' that represent ML model accuracy. A metric can be logged multiple times. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the metric. #' @param run_id ID of the run under which to log the metric. @@ -442,14 +380,11 @@ experimentsLogMetric <- function(client, key, value, timestamp, run_id = NULL, r value = value) client$do("POST", "/api/2.0/mlflow/runs/log-metric", body = body) } - #' Log a model. #' #' **NOTE:** Experimental: This API may change or be removed in a future release #' without warning. #' @param client Required. Instance of DatabricksClient() - - #' #' @param model_json MLmodel file in json format. #' @param run_id ID of the run to log under. @@ -460,7 +395,6 @@ experimentsLogModel <- function(client, model_json = NULL, run_id = NULL) { body <- list(model_json = model_json, run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/log-model", body = body) } - #' Log a param. #' #' Logs a param used for a run. A param is a key-value pair (string key, string @@ -468,9 +402,6 @@ experimentsLogModel <- function(client, model_json = NULL, run_id = NULL) { #' constant dates and values used in an ETL pipeline. A param can be logged only #' once for a run. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the param. #' @param run_id ID of the run under which to log the param. @@ -483,7 +414,6 @@ experimentsLogParam <- function(client, key, value, run_id = NULL, run_uuid = NU body <- list(key = key, run_id = run_id, run_uuid = run_uuid, value = value) client$do("POST", "/api/2.0/mlflow/runs/log-parameter", body = body) } - #' Restores an experiment. #' #' Restore an experiment marked for deletion. This also restores associated @@ -493,9 +423,6 @@ experimentsLogParam <- function(client, key, value, run_id = NULL, run_uuid = NU #' Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was #' permanently deleted. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. ID of the associated experiment. #' @@ -505,14 +432,10 @@ experimentsRestoreExperiment <- function(client, experiment_id) { body <- list(experiment_id = experiment_id) client$do("POST", "/api/2.0/mlflow/experiments/restore", body = body) } - #' Restore a run. #' #' Restores a deleted run. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param run_id Required. ID of the run to restore. #' @@ -522,7 +445,6 @@ experimentsRestoreRun <- function(client, run_id) { body <- list(run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/restore", body = body) } - #' Restore runs by deletion time. #' #' Bulk restore runs in an experiment that were deleted no earlier than the @@ -530,9 +452,6 @@ experimentsRestoreRun <- function(client, run_id) { #' from a Databricks Notebook in Python, you can use the client code snippet on #' https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. The ID of the experiment containing the runs to restore. #' @param max_runs An optional positive integer indicating the maximum number of runs to restore. @@ -544,13 +463,10 @@ experimentsRestoreRuns <- function(client, experiment_id, min_timestamp_millis, body <- list(experiment_id = experiment_id, max_runs = max_runs, min_timestamp_millis = min_timestamp_millis) client$do("POST", "/api/2.0/mlflow/databricks/runs/restore-runs", body = body) } - #' Search experiments. #' #' Searches for experiments that satisfy specified search criteria. #' @param client Required. Instance of DatabricksClient() - - #' #' @param filter String representing a SQL filter condition (e.g. #' @param max_results Maximum number of experiments desired. @@ -583,15 +499,12 @@ experimentsSearchExperiments <- function(client, filter = NULL, max_results = NU return(results) } - #' Search for runs. #' #' Searches for runs that satisfy expressions. #' #' Search expressions can use `mlflowMetric` and `mlflowParam` keys.', #' @param client Required. Instance of DatabricksClient() - - #' #' @param experiment_ids List of experiment IDs to search over. #' @param filter A filter expression over params, metrics, and tags, that allows returning a subset of runs. @@ -625,15 +538,11 @@ experimentsSearchRuns <- function(client, experiment_ids = NULL, filter = NULL, return(results) } - #' Set a tag. #' #' Sets a tag on an experiment. Experiment tags are metadata that can be #' updated. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. ID of the experiment under which to log the tag. #' @param key Required. Name of the tag. @@ -645,15 +554,11 @@ experimentsSetExperimentTag <- function(client, experiment_id, key, value) { body <- list(experiment_id = experiment_id, key = key, value = value) client$do("POST", "/api/2.0/mlflow/experiments/set-experiment-tag", body = body) } - #' Set experiment permissions. #' #' Sets permissions on an experiment. Experiments can inherit permissions from #' their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param experiment_id Required. The experiment for which to get or manage permissions. @@ -665,15 +570,11 @@ experimentsSetPermissions <- function(client, experiment_id, access_control_list client$do("PUT", paste("/api/2.0/permissions/experiments/", experiment_id, sep = ""), body = body) } - #' Set a tag. #' #' Sets a tag on a run. Tags are run metadata that can be updated during a run #' and after a run completes. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the tag. #' @param run_id ID of the run under which to log the tag. @@ -686,14 +587,10 @@ experimentsSetTag <- function(client, key, value, run_id = NULL, run_uuid = NULL body <- list(key = key, run_id = run_id, run_uuid = run_uuid, value = value) client$do("POST", "/api/2.0/mlflow/runs/set-tag", body = body) } - #' Update an experiment. #' #' Updates experiment metadata. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param experiment_id Required. ID of the associated experiment. #' @param new_name If provided, the experiment's name is changed to the new name. @@ -704,15 +601,11 @@ experimentsUpdateExperiment <- function(client, experiment_id, new_name = NULL) body <- list(experiment_id = experiment_id, new_name = new_name) client$do("POST", "/api/2.0/mlflow/experiments/update", body = body) } - #' Update experiment permissions. #' #' Updates the permissions on an experiment. Experiments can inherit permissions #' from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param experiment_id Required. The experiment for which to get or manage permissions. @@ -724,13 +617,10 @@ experimentsUpdatePermissions <- function(client, experiment_id, access_control_l client$do("PATCH", paste("/api/2.0/permissions/experiments/", experiment_id, sep = ""), body = body) } - #' Update a run. #' #' Updates run metadata. #' @param client Required. Instance of DatabricksClient() - - #' #' @param end_time Unix timestamp in milliseconds of when the run ended. #' @param run_id ID of the run to update. @@ -745,3 +635,32 @@ experimentsUpdateRun <- function(client, end_time = NULL, run_id = NULL, run_uui client$do("POST", "/api/2.0/mlflow/runs/update", body = body) } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/R/external_locations.R b/R/external_locations.R index de502617..7838d4c9 100755 --- a/R/external_locations.R +++ b/R/external_locations.R @@ -9,9 +9,6 @@ NULL #' metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both #' the metastore and the associated storage credential. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_point The AWS access point to use when accesing s3 for this external location. #' @param comment User-provided free-form text description. @@ -31,15 +28,11 @@ externalLocationsCreate <- function(client, name, url, credential_name, access_p skip_validation = skip_validation, url = url) client$do("POST", "/api/2.1/unity-catalog/external-locations", body = body) } - #' Delete an external location. #' #' Deletes the specified external location from the metastore. The caller must #' be the owner of the external location. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param force Force deletion even if there are dependent external tables or mounts. #' @param name Required. Name of the external location. @@ -51,16 +44,12 @@ externalLocationsDelete <- function(client, name, force = NULL) { client$do("DELETE", paste("/api/2.1/unity-catalog/external-locations/", name, sep = ""), query = query) } - #' Get an external location. #' #' Gets an external location from the metastore. The caller must be either a #' metastore admin, the owner of the external location, or a user that has some #' privilege on the external location. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param include_browse Whether to include external locations in the response for which the principal can only access selective metadata for. #' @param name Required. Name of the external location. @@ -72,7 +61,6 @@ externalLocationsGet <- function(client, name, include_browse = NULL) { client$do("GET", paste("/api/2.1/unity-catalog/external-locations/", name, sep = ""), query = query) } - #' List external locations. #' #' Gets an array of external locations (__ExternalLocationInfo__ objects) from @@ -81,8 +69,6 @@ externalLocationsGet <- function(client, name, include_browse = NULL) { #' location. There is no guarantee of a specific ordering of the elements in the #' array. #' @param client Required. Instance of DatabricksClient() - - #' #' @param include_browse Whether to include external locations in the response for which the principal can only access selective metadata for. #' @param max_results Maximum number of external locations to return. @@ -112,16 +98,12 @@ externalLocationsList <- function(client, include_browse = NULL, max_results = N return(results) } - #' Update an external location. #' #' Updates an external location in the metastore. The caller must be the owner #' of the external location, or be a metastore admin. In the second case, the #' admin can only update the name of the external location. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_point The AWS access point to use when accesing s3 for this external location. #' @param comment User-provided free-form text description. @@ -148,3 +130,7 @@ externalLocationsUpdate <- function(client, name, access_point = NULL, comment = sep = ""), body = body) } + + + + diff --git a/R/files.R b/R/files.R index 9d851e31..021b1b22 100755 --- a/R/files.R +++ b/R/files.R @@ -10,9 +10,6 @@ NULL #' an existing directory, returns a success response; this method is idempotent #' (it will succeed if the directory already exists). #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param directory_path Required. The absolute path of a directory. #' @@ -22,14 +19,10 @@ filesCreateDirectory <- function(client, directory_path) { client$do("PUT", paste("/api/2.0/fs/directories", directory_path, sep = "")) } - #' Delete a file. #' #' Deletes a file. If the request is successful, there is no response body. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param file_path Required. The absolute path of the file. #' @@ -39,7 +32,6 @@ filesDelete <- function(client, file_path) { client$do("DELETE", paste("/api/2.0/fs/files", file_path, sep = "")) } - #' Delete a directory. #' #' Deletes an empty directory. @@ -48,9 +40,6 @@ filesDelete <- function(client, file_path) { #' be done by listing the directory contents and deleting each file and #' subdirectory recursively. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param directory_path Required. The absolute path of a directory. #' @@ -60,15 +49,11 @@ filesDeleteDirectory <- function(client, directory_path) { client$do("DELETE", paste("/api/2.0/fs/directories", directory_path, sep = "")) } - #' Download a file. #' #' Downloads a file of up to 5 GiB. The file contents are the response body. #' This is a standard HTTP file download, not a JSON RPC. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param file_path Required. The absolute path of the file. #' @@ -78,7 +63,6 @@ filesDownload <- function(client, file_path) { client$do("GET", paste("/api/2.0/fs/files", file_path, sep = "")) } - #' Get directory metadata. #' #' Get the metadata of a directory. The response HTTP headers contain the @@ -91,9 +75,6 @@ filesDownload <- function(client, file_path) { #' will create the directory if it does not exist, and is idempotent (it will #' succeed if the directory already exists). #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param directory_path Required. The absolute path of a directory. #' @@ -103,15 +84,11 @@ filesGetDirectoryMetadata <- function(client, directory_path) { client$do("HEAD", paste("/api/2.0/fs/directories", directory_path, sep = "")) } - #' Get file metadata. #' #' Get the metadata of a file. The response HTTP headers contain the metadata. #' There is no response body. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param file_path Required. The absolute path of the file. #' @@ -121,15 +98,11 @@ filesGetMetadata <- function(client, file_path) { client$do("HEAD", paste("/api/2.0/fs/files", file_path, sep = "")) } - #' List directory contents. #' #' Returns the contents of a directory. If there is no directory at the #' specified path, the API returns a HTTP 404 error. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param directory_path Required. The absolute path of a directory. #' @param page_size The maximum number of directory entries to return. @@ -160,7 +133,6 @@ filesListDirectoryContents <- function(client, directory_path, page_size = NULL, return(results) } - #' Upload a file. #' #' Uploads a file of up to 5 GiB. The file contents should be sent as the @@ -169,9 +141,6 @@ filesListDirectoryContents <- function(client, directory_path, page_size = NULL, #' exactly the bytes sent in the request body. If the request is successful, #' there is no response body. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param contents This field has no description yet. #' @param file_path Required. The absolute path of the file. @@ -184,3 +153,10 @@ filesUpload <- function(client, file_path, contents, overwrite = NULL) { client$do("PUT", paste("/api/2.0/fs/files", file_path, sep = ""), query = query) } + + + + + + + diff --git a/R/functions.R b/R/functions.R index 2fad039e..7f738059 100755 --- a/R/functions.R +++ b/R/functions.R @@ -11,9 +11,6 @@ NULL #' created: - **USE_CATALOG** on the function's parent catalog - **USE_SCHEMA** #' and **CREATE_FUNCTION** on the function's parent schema #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param function_info Required. Partial __FunctionInfo__ specifying the function to be created. #' @@ -23,7 +20,6 @@ functionsCreate <- function(client, function_info) { body <- list(function_info = function_info) client$do("POST", "/api/2.1/unity-catalog/functions", body = body) } - #' Delete a function. #' #' Deletes the function that matches the supplied name. For the deletion to @@ -34,9 +30,6 @@ functionsCreate <- function(client, function_info) { #' privilege on its parent catalog and the **USE_SCHEMA** privilege on its #' parent schema #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param force Force deletion even if the function is notempty. #' @param name Required. The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). @@ -48,7 +41,6 @@ functionsDelete <- function(client, name, force = NULL) { client$do("DELETE", paste("/api/2.1/unity-catalog/functions/", name, sep = ""), query = query) } - #' Get a function. #' #' Gets a function from within a parent catalog and schema. For the fetch to @@ -59,9 +51,6 @@ functionsDelete <- function(client, name, force = NULL) { #' catalog, the **USE_SCHEMA** privilege on the function's parent schema, and #' the **EXECUTE** privilege on the function itself #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param include_browse Whether to include functions in the response for which the principal can only access selective metadata for. #' @param name Required. The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). @@ -73,7 +62,6 @@ functionsGet <- function(client, name, include_browse = NULL) { client$do("GET", paste("/api/2.1/unity-catalog/functions/", name, sep = ""), query = query) } - #' List functions. #' #' List functions within the specified parent catalog and schema. If the user is @@ -84,9 +72,6 @@ functionsGet <- function(client, name, include_browse = NULL) { #' is the owner. There is no guarantee of a specific ordering of the elements in #' the array. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. Name of parent catalog for functions of interest. #' @param include_browse Whether to include functions in the response for which the principal can only access selective metadata for. @@ -119,7 +104,6 @@ functionsList <- function(client, catalog_name, schema_name, include_browse = NU return(results) } - #' Update a function. #' #' Updates the function that matches the supplied name. Only the owner of the @@ -131,9 +115,6 @@ functionsList <- function(client, catalog_name, schema_name, include_browse = NU #' privilege on its parent catalog as well as the **USE_SCHEMA** privilege on #' the function's parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). #' @param owner Username of current owner of function. @@ -146,3 +127,7 @@ functionsUpdate <- function(client, name, owner = NULL) { body = body) } + + + + diff --git a/R/git_credentials.R b/R/git_credentials.R index 913435ed..d15f9436 100755 --- a/R/git_credentials.R +++ b/R/git_credentials.R @@ -10,9 +10,6 @@ NULL #' exists will fail. Use the PATCH endpoint to update existing credentials, or #' the DELETE endpoint to delete existing credentials. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param git_provider Required. Git provider. #' @param git_username Git username. @@ -24,14 +21,10 @@ gitCredentialsCreate <- function(client, git_provider, git_username = NULL, pers body <- list(git_provider = git_provider, git_username = git_username, personal_access_token = personal_access_token) client$do("POST", "/api/2.0/git-credentials", body = body) } - #' Delete a credential. #' #' Deletes the specified Git credential. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param credential_id Required. The ID for the corresponding credential to access. #' @@ -41,14 +34,10 @@ gitCredentialsDelete <- function(client, credential_id) { client$do("DELETE", paste("/api/2.0/git-credentials/", credential_id, sep = "")) } - #' Get a credential entry. #' #' Gets the Git credential with the specified credential ID. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param credential_id Required. The ID for the corresponding credential to access. #' @@ -58,7 +47,6 @@ gitCredentialsGet <- function(client, credential_id) { client$do("GET", paste("/api/2.0/git-credentials/", credential_id, sep = "")) } - #' Get Git credentials. #' #' Lists the calling user's Git credentials. One credential per user is @@ -75,14 +63,10 @@ gitCredentialsList <- function(client) { return(json$credentials) } - #' Update a credential. #' #' Updates the specified Git credential. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param credential_id Required. The ID for the corresponding credential to access. #' @param git_provider Git provider. @@ -98,3 +82,7 @@ gitCredentialsUpdate <- function(client, credential_id, git_provider = NULL, git body = body) } + + + + diff --git a/R/global_init_scripts.R b/R/global_init_scripts.R index 087a6cc6..f2489ebb 100755 --- a/R/global_init_scripts.R +++ b/R/global_init_scripts.R @@ -7,9 +7,6 @@ NULL #' #' Creates a new global init script in this workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param enabled Specifies whether the script is enabled. #' @param name Required. The name of the script. @@ -22,14 +19,10 @@ globalInitScriptsCreate <- function(client, name, script, enabled = NULL, positi body <- list(enabled = enabled, name = name, position = position, script = script) client$do("POST", "/api/2.0/global-init-scripts", body = body) } - #' Delete init script. #' #' Deletes a global init script. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param script_id Required. The ID of the global init script. #' @@ -39,14 +32,10 @@ globalInitScriptsDelete <- function(client, script_id) { client$do("DELETE", paste("/api/2.0/global-init-scripts/", script_id, sep = "")) } - #' Get an init script. #' #' Gets all the details of a script, including its Base64-encoded contents. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param script_id Required. The ID of the global init script. #' @@ -56,7 +45,6 @@ globalInitScriptsGet <- function(client, script_id) { client$do("GET", paste("/api/2.0/global-init-scripts/", script_id, sep = "")) } - #' Get init scripts. #' #' Get a list of all global init scripts for this workspace. This returns all @@ -75,15 +63,11 @@ globalInitScriptsList <- function(client) { return(json$scripts) } - #' Update init script. #' #' Updates a global init script, specifying only the fields to change. All #' fields are optional. Unspecified fields retain their current value. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param enabled Specifies whether the script is enabled. #' @param name Required. The name of the script. @@ -100,3 +84,7 @@ globalInitScriptsUpdate <- function(client, script_id, name, script, enabled = N body = body) } + + + + diff --git a/R/grants.R b/R/grants.R index df2a53b2..c2a9f4d4 100755 --- a/R/grants.R +++ b/R/grants.R @@ -7,9 +7,6 @@ NULL #' #' Gets the permissions for a securable. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of securable. #' @param principal If provided, only the permissions for the specified principal (user or group) are returned. @@ -22,14 +19,10 @@ grantsGet <- function(client, securable_type, full_name, principal = NULL) { client$do("GET", paste("/api/2.1/unity-catalog/permissions/", securable_type, "/", full_name, sep = ""), query = query) } - #' Get effective permissions. #' #' Gets the effective permissions for a securable. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of securable. #' @param principal If provided, only the effective permissions for the specified principal (user or group) are returned. @@ -42,14 +35,10 @@ grantsGetEffective <- function(client, securable_type, full_name, principal = NU client$do("GET", paste("/api/2.1/unity-catalog/effective-permissions/", securable_type, "/", full_name, sep = ""), query = query) } - #' Update permissions. #' #' Updates the permissions for a securable. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param changes Array of permissions change objects. #' @param full_name Required. Full name of securable. @@ -63,3 +52,5 @@ grantsUpdate <- function(client, securable_type, full_name, changes = NULL) { "/", full_name, sep = ""), body = body) } + + diff --git a/R/groups.R b/R/groups.R index f82e680f..aa7153fd 100755 --- a/R/groups.R +++ b/R/groups.R @@ -8,8 +8,6 @@ NULL #' Creates a group in the Databricks workspace with a unique name, using the #' supplied group details. #' @param client Required. Instance of DatabricksClient() - - #' #' @param display_name String that represents a human-readable group name. #' @param entitlements Entitlements assigned to the group. @@ -30,14 +28,10 @@ groupsCreate <- function(client, display_name = NULL, entitlements = NULL, exter schemas = schemas) client$do("POST", "/api/2.0/preview/scim/v2/Groups", body = body) } - #' Delete a group. #' #' Deletes a group from the Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a group in the Databricks workspace. #' @@ -47,14 +41,10 @@ groupsDelete <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = "")) } - #' Get group details. #' #' Gets the information for a specific group in the Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a group in the Databricks workspace. #' @@ -64,13 +54,10 @@ groupsGet <- function(client, id) { client$do("GET", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = "")) } - #' List group details. #' #' Gets all details of the groups associated with the Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - #' #' @param attributes Comma-separated list of attributes to return in response. #' @param count Desired number of results per page. @@ -105,14 +92,10 @@ groupsList <- function(client, attributes = NULL, count = NULL, excluded_attribu return(results) } - #' Update group details. #' #' Partially updates the details of a group. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a group in the Databricks workspace. #' @param operations This field has no description yet. @@ -124,14 +107,10 @@ groupsPatch <- function(client, id, operations = NULL, schemas = NULL) { body <- list(Operations = operations, schemas = schemas) client$do("PATCH", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = ""), body = body) } - #' Replace a group. #' #' Updates the details of a group by replacing the entire group entity. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param display_name String that represents a human-readable group name. #' @param entitlements Entitlements assigned to the group. @@ -153,3 +132,8 @@ groupsUpdate <- function(client, id, display_name = NULL, entitlements = NULL, e client$do("PUT", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = ""), body = body) } + + + + + diff --git a/R/instance_pools.R b/R/instance_pools.R index 2401c050..3faa0af1 100755 --- a/R/instance_pools.R +++ b/R/instance_pools.R @@ -7,9 +7,6 @@ NULL #' #' Creates a new instance pool using idle and ready-to-use cloud instances. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param aws_attributes Attributes related to instance pools running on Amazon Web Services. #' @param azure_attributes Attributes related to instance pools running on Azure. @@ -39,15 +36,11 @@ instancePoolsCreate <- function(client, instance_pool_name, node_type_id, aws_at preloaded_spark_versions = preloaded_spark_versions) client$do("POST", "/api/2.0/instance-pools/create", body = body) } - #' Delete an instance pool. #' #' Deletes the instance pool permanently. The idle instances in the pool are #' terminated asynchronously. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param instance_pool_id Required. The instance pool to be terminated. #' @@ -57,14 +50,10 @@ instancePoolsDelete <- function(client, instance_pool_id) { body <- list(instance_pool_id = instance_pool_id) client$do("POST", "/api/2.0/instance-pools/delete", body = body) } - #' Edit an existing instance pool. #' #' Modifies the configuration of an existing instance pool. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param custom_tags Additional tags for pool resources. #' @param idle_instance_autotermination_minutes Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. @@ -84,14 +73,10 @@ instancePoolsEdit <- function(client, instance_pool_id, instance_pool_name, node max_capacity = max_capacity, min_idle_instances = min_idle_instances, node_type_id = node_type_id) client$do("POST", "/api/2.0/instance-pools/edit", body = body) } - #' Get instance pool information. #' #' Retrieve the information for an instance pool based on its identifier. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param instance_pool_id Required. The canonical unique identifier for the instance pool. #' @@ -101,14 +86,10 @@ instancePoolsGet <- function(client, instance_pool_id) { query <- list(instance_pool_id = instance_pool_id) client$do("GET", "/api/2.0/instance-pools/get", query = query) } - #' Get instance pool permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. #' @@ -119,15 +100,11 @@ instancePoolsGetPermissionLevels <- function(client, instance_pool_id) { client$do("GET", paste("/api/2.0/permissions/instance-pools/", instance_pool_id, "/permissionLevels", , sep = "")) } - #' Get instance pool permissions. #' #' Gets the permissions of an instance pool. Instance pools can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. #' @@ -138,7 +115,6 @@ instancePoolsGetPermissions <- function(client, instance_pool_id) { client$do("GET", paste("/api/2.0/permissions/instance-pools/", instance_pool_id, sep = "")) } - #' List instance pool info. #' #' Gets a list of instance pools with their statistics. @@ -154,15 +130,11 @@ instancePoolsList <- function(client) { return(json$instance_pools) } - #' Set instance pool permissions. #' #' Sets permissions on an instance pool. Instance pools can inherit permissions #' from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. @@ -174,15 +146,11 @@ instancePoolsSetPermissions <- function(client, instance_pool_id, access_control client$do("PUT", paste("/api/2.0/permissions/instance-pools/", instance_pool_id, sep = ""), body = body) } - #' Update instance pool permissions. #' #' Updates the permissions on an instance pool. Instance pools can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. @@ -195,3 +163,11 @@ instancePoolsUpdatePermissions <- function(client, instance_pool_id, access_cont sep = ""), body = body) } + + + + + + + + diff --git a/R/instance_profiles.R b/R/instance_profiles.R index ef905b30..b4b2c744 100755 --- a/R/instance_profiles.R +++ b/R/instance_profiles.R @@ -8,9 +8,6 @@ NULL #' In the UI, you can select the instance profile when launching clusters. This #' API is only available to admin users. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param iam_role_arn The AWS IAM role ARN of the role associated with the instance profile. #' @param instance_profile_arn Required. The AWS ARN of the instance profile to register with Databricks. @@ -25,7 +22,6 @@ instanceProfilesAdd <- function(client, instance_profile_arn, iam_role_arn = NUL is_meta_instance_profile = is_meta_instance_profile, skip_validation = skip_validation) client$do("POST", "/api/2.0/instance-profiles/add", body = body) } - #' Edit an instance profile. #' #' The only supported field to change is the optional IAM role ARN associated @@ -44,9 +40,6 @@ instanceProfilesAdd <- function(client, instance_profile_arn, iam_role_arn = NUL #' [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html #' [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param iam_role_arn The AWS IAM role ARN of the role associated with the instance profile. #' @param instance_profile_arn Required. The AWS ARN of the instance profile to register with Databricks. @@ -60,7 +53,6 @@ instanceProfilesEdit <- function(client, instance_profile_arn, iam_role_arn = NU is_meta_instance_profile = is_meta_instance_profile) client$do("POST", "/api/2.0/instance-profiles/edit", body = body) } - #' List available instance profiles. #' #' List the instance profiles that the calling user can use to launch a cluster. @@ -78,7 +70,6 @@ instanceProfilesList <- function(client) { return(json$instance_profiles) } - #' Remove the instance profile. #' #' Remove the instance profile with the provided ARN. Existing clusters with @@ -86,9 +77,6 @@ instanceProfilesList <- function(client) { #' #' This API is only accessible to admin users. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param instance_profile_arn Required. The ARN of the instance profile to remove. #' @@ -99,3 +87,6 @@ instanceProfilesRemove <- function(client, instance_profile_arn) { client$do("POST", "/api/2.0/instance-profiles/remove", body = body) } + + + diff --git a/R/ip_access_lists.R b/R/ip_access_lists.R index f710ccfd..1b40a4f2 100755 --- a/R/ip_access_lists.R +++ b/R/ip_access_lists.R @@ -22,9 +22,6 @@ NULL #' IP access list has no effect until you enable the feature. See #' :method:workspaceconf/setStatus #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param ip_addresses This field has no description yet. #' @param label Required. Label for the IP access list. @@ -36,14 +33,10 @@ ipAccessListsCreate <- function(client, label, list_type, ip_addresses = NULL) { body <- list(ip_addresses = ip_addresses, label = label, list_type = list_type) client$do("POST", "/api/2.0/ip-access-lists", body = body) } - #' Delete access list. #' #' Deletes an IP access list, specified by its list ID. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param ip_access_list_id Required. The ID for the corresponding IP access list. #' @@ -53,14 +46,10 @@ ipAccessListsDelete <- function(client, ip_access_list_id) { client$do("DELETE", paste("/api/2.0/ip-access-lists/", ip_access_list_id, sep = "")) } - #' Get access list. #' #' Gets an IP access list, specified by its list ID. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param ip_access_list_id Required. The ID for the corresponding IP access list. #' @@ -70,7 +59,6 @@ ipAccessListsGet <- function(client, ip_access_list_id) { client$do("GET", paste("/api/2.0/ip-access-lists/", ip_access_list_id, sep = "")) } - #' Get access lists. #' #' Gets all IP access lists for the specified workspace. @@ -86,7 +74,6 @@ ipAccessListsList <- function(client) { return(json$ip_access_lists) } - #' Replace access list. #' #' Replaces an IP access list, specified by its ID. @@ -102,9 +89,6 @@ ipAccessListsList <- function(client) { #' Note that your resulting IP access list has no effect until you enable the #' feature. See :method:workspaceconf/setStatus. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param enabled Required. Specifies whether this IP access list is enabled. #' @param ip_access_list_id Required. The ID for the corresponding IP access list. @@ -120,7 +104,6 @@ ipAccessListsReplace <- function(client, ip_access_list_id, label, list_type, en client$do("PUT", paste("/api/2.0/ip-access-lists/", ip_access_list_id, sep = ""), body = body) } - #' Update access list. #' #' Updates an existing IP access list, specified by its ID. @@ -141,9 +124,6 @@ ipAccessListsReplace <- function(client, ip_access_list_id, label, list_type, en #' resulting IP access list has no effect until you enable the feature. See #' :method:workspaceconf/setStatus. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param enabled Specifies whether this IP access list is enabled. #' @param ip_access_list_id Required. The ID for the corresponding IP access list. @@ -160,3 +140,8 @@ ipAccessListsUpdate <- function(client, ip_access_list_id, enabled = NULL, ip_ad body = body) } + + + + + diff --git a/R/jobs.R b/R/jobs.R index 3303f872..1bac8a6d 100755 --- a/R/jobs.R +++ b/R/jobs.R @@ -8,8 +8,6 @@ NULL #' Cancels all active runs of a job. The runs are canceled asynchronously, so it #' doesn't prevent new runs from being started. #' @param client Required. Instance of DatabricksClient() - - #' #' @param all_queued_runs Optional boolean parameter to cancel all queued runs. #' @param job_id The canonical identifier of the job to cancel all runs of. @@ -20,77 +18,24 @@ jobsCancelAllRuns <- function(client, all_queued_runs = NULL, job_id = NULL) { body <- list(all_queued_runs = all_queued_runs, job_id = job_id) client$do("POST", "/api/2.1/jobs/runs/cancel-all", body = body) } - #' Cancel a run. #' #' Cancels a job run or a task run. The run is canceled asynchronously, so it #' may still be running when this request completes. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Jobs on Databricks reach -#' TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Jobs is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param run_id Required. This field is required. #' #' @rdname jobsCancelRun #' @export -jobsCancelRun <- function(client, run_id, timeout = 20, callback = cli_reporter) { +jobsCancelRun <- function(client, run_id) { body <- list(run_id = run_id) - op_response <- client$do("POST", "/api/2.1/jobs/runs/cancel", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("TERMINATED", "SKIPPED", c()) - failure_states <- c("INTERNAL_ERROR", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- jobsGetRun(client, run_id = run_id) - status <- poll$state$life_cycle_state - status_message <- paste("current status:", status) - if (!is.null(poll$state)) { - status_message <- poll$state$state_message - } - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach TERMINATED or SKIPPED, got ", status, "-", - status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::jobsGetRun(run_id=", run_id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.1/jobs/runs/cancel", body = body) } - #' Create a new job. #' #' Create a new job. #' @param client Required. Instance of DatabricksClient() - - #' #' @param access_control_list List of permissions to set on the job. #' @param compute A list of compute requirements that can be referenced by tasks of this job. @@ -133,14 +78,10 @@ jobsCreate <- function(client, access_control_list = NULL, compute = NULL, conti timeout_seconds = timeout_seconds, trigger = trigger, webhook_notifications = webhook_notifications) client$do("POST", "/api/2.1/jobs/create", body = body) } - #' Delete a job. #' #' Deletes a job. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param job_id Required. The canonical identifier of the job to delete. #' @@ -150,14 +91,10 @@ jobsDelete <- function(client, job_id) { body <- list(job_id = job_id) client$do("POST", "/api/2.1/jobs/delete", body = body) } - #' Delete a job run. #' #' Deletes a non-active run. Returns an error if the run is active. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param run_id Required. The canonical identifier of the run for which to retrieve the metadata. #' @@ -167,14 +104,10 @@ jobsDeleteRun <- function(client, run_id) { body <- list(run_id = run_id) client$do("POST", "/api/2.1/jobs/runs/delete", body = body) } - #' Export and retrieve a job run. #' #' Export and retrieve the job run task. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param run_id Required. The canonical identifier for the run. #' @param views_to_export Which views to export (CODE, DASHBOARDS, or ALL). @@ -185,14 +118,10 @@ jobsExportRun <- function(client, run_id, views_to_export = NULL) { query <- list(run_id = run_id, views_to_export = views_to_export) client$do("GET", "/api/2.1/jobs/runs/export", query = query) } - #' Get a single job. #' #' Retrieves the details for a single job. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param job_id Required. The canonical identifier of the job to retrieve information about. #' @@ -202,14 +131,10 @@ jobsGet <- function(client, job_id) { query <- list(job_id = job_id) client$do("GET", "/api/2.1/jobs/get", query = query) } - #' Get job permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param job_id Required. The job for which to get or manage permissions. #' @@ -220,15 +145,11 @@ jobsGetPermissionLevels <- function(client, job_id) { client$do("GET", paste("/api/2.0/permissions/jobs/", job_id, "/permissionLevels", , sep = "")) } - #' Get job permissions. #' #' Gets the permissions of a job. Jobs can inherit permissions from their root #' object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param job_id Required. The job for which to get or manage permissions. #' @@ -238,22 +159,10 @@ jobsGetPermissions <- function(client, job_id) { client$do("GET", paste("/api/2.0/permissions/jobs/", job_id, sep = "")) } - #' Get a single job run. #' #' Retrieve the metadata of a run. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Jobs on Databricks reach -#' TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Jobs is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param include_history Whether to include the repair history in the response. #' @param include_resolved_values Whether to include resolved parameter values in the response. @@ -261,51 +170,11 @@ jobsGetPermissions <- function(client, job_id) { #' #' @rdname jobsGetRun #' @export -jobsGetRun <- function(client, run_id, include_history = NULL, include_resolved_values = NULL, - timeout = 20, callback = cli_reporter) { +jobsGetRun <- function(client, run_id, include_history = NULL, include_resolved_values = NULL) { query <- list(include_history = include_history, include_resolved_values = include_resolved_values, run_id = run_id) - op_response <- client$do("GET", "/api/2.1/jobs/runs/get", query = query) - started <- as.numeric(Sys.time()) - target_states <- c("TERMINATED", "SKIPPED", c()) - failure_states <- c("INTERNAL_ERROR", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- jobsGetRun(client, run_id = op_response$run_id) - status <- poll$state$life_cycle_state - status_message <- paste("current status:", status) - if (!is.null(poll$state)) { - status_message <- poll$state$state_message - } - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach TERMINATED or SKIPPED, got ", status, "-", - status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::jobsGetRun(run_id=", op_response$run_id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("GET", "/api/2.1/jobs/runs/get", query = query) } - #' Get the output for a single run. #' #' Retrieve the output and metadata of a single task run. When a notebook task @@ -319,9 +188,6 @@ jobsGetRun <- function(client, run_id, include_history = NULL, include_resolved_ #' automatically removed after 60 days. If you to want to reference them beyond #' 60 days, you must save old run results before they expire. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param run_id Required. The canonical identifier for the run. #' @@ -331,13 +197,10 @@ jobsGetRunOutput <- function(client, run_id) { query <- list(run_id = run_id) client$do("GET", "/api/2.1/jobs/runs/get-output", query = query) } - #' List jobs. #' #' Retrieves a list of jobs. #' @param client Required. Instance of DatabricksClient() - - #' #' @param expand_tasks Whether to include task and cluster details in the response. #' @param limit The number of jobs to return. @@ -370,13 +233,10 @@ jobsList <- function(client, expand_tasks = NULL, limit = NULL, name = NULL, off return(results) } - #' List job runs. #' #' List runs in descending order by start time. #' @param client Required. Instance of DatabricksClient() - - #' #' @param active_only If active_only is `true`, only active runs are included in the results; otherwise, lists both active and completed runs. #' @param completed_only If completed_only is `true`, only completed runs are included in the results; otherwise, lists both active and completed runs. @@ -416,13 +276,172 @@ jobsListRuns <- function(client, active_only = NULL, completed_only = NULL, expa return(results) } - #' Repair a job run. #' #' Re-run one or more tasks. Tasks are re-run as part of the original job run. #' They use the current job and task settings, and can be viewed in the history #' for the original job run. #' @param client Required. Instance of DatabricksClient() +#' +#' @param dbt_commands An array of commands to execute for jobs with the dbt task, for example `'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']`. +#' @param jar_params A list of parameters for jobs with Spark JAR tasks, for example `'jar_params': ['john doe', '35']`. +#' @param job_parameters Job-level parameters used in the run. +#' @param latest_repair_id The ID of the latest repair. +#' @param notebook_params A map from keys to values for jobs with notebook task, for example `'notebook_params': {'name': 'john doe', 'age': '35'}`. +#' @param pipeline_params This field has no description yet. +#' @param python_named_params A map from keys to values for jobs with Python wheel task, for example `'python_named_params': {'name': 'task', 'data': 'dbfs:/path/to/data.json'}`. +#' @param python_params A list of parameters for jobs with Python tasks, for example `'python_params': ['john doe', '35']`. +#' @param rerun_all_failed_tasks If true, repair all failed tasks. +#' @param rerun_dependent_tasks If true, repair all tasks that depend on the tasks in `rerun_tasks`, even if they were previously successful. +#' @param rerun_tasks The task keys of the task runs to repair. +#' @param run_id Required. The job run ID of the run to repair. +#' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. +#' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. +#' +#' @rdname jobsRepairRun +#' @export +jobsRepairRun <- function(client, run_id, dbt_commands = NULL, jar_params = NULL, + job_parameters = NULL, latest_repair_id = NULL, notebook_params = NULL, pipeline_params = NULL, + python_named_params = NULL, python_params = NULL, rerun_all_failed_tasks = NULL, + rerun_dependent_tasks = NULL, rerun_tasks = NULL, spark_submit_params = NULL, + sql_params = NULL) { + body <- list(dbt_commands = dbt_commands, jar_params = jar_params, job_parameters = job_parameters, + latest_repair_id = latest_repair_id, notebook_params = notebook_params, pipeline_params = pipeline_params, + python_named_params = python_named_params, python_params = python_params, + rerun_all_failed_tasks = rerun_all_failed_tasks, rerun_dependent_tasks = rerun_dependent_tasks, + rerun_tasks = rerun_tasks, run_id = run_id, spark_submit_params = spark_submit_params, + sql_params = sql_params) + client$do("POST", "/api/2.1/jobs/runs/repair", body = body) +} +#' Update all job settings (reset). +#' +#' Overwrite all settings for the given job. Use the [_Update_ +#' endpoint](:method:jobs/update) to update job settings partially. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param job_id Required. The canonical identifier of the job to reset. +#' @param new_settings Required. The new settings of the job. +#' +#' @rdname jobsReset +#' @export +jobsReset <- function(client, job_id, new_settings) { + body <- list(job_id = job_id, new_settings = new_settings) + client$do("POST", "/api/2.1/jobs/reset", body = body) +} +#' Trigger a new job run. +#' +#' Run a job and return the `run_id` of the triggered run. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param dbt_commands An array of commands to execute for jobs with the dbt task, for example `'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']`. +#' @param idempotency_token An optional token to guarantee the idempotency of job run requests. +#' @param jar_params A list of parameters for jobs with Spark JAR tasks, for example `'jar_params': ['john doe', '35']`. +#' @param job_id Required. The ID of the job to be executed. +#' @param job_parameters Job-level parameters used in the run. +#' @param notebook_params A map from keys to values for jobs with notebook task, for example `'notebook_params': {'name': 'john doe', 'age': '35'}`. +#' @param pipeline_params This field has no description yet. +#' @param python_named_params A map from keys to values for jobs with Python wheel task, for example `'python_named_params': {'name': 'task', 'data': 'dbfs:/path/to/data.json'}`. +#' @param python_params A list of parameters for jobs with Python tasks, for example `'python_params': ['john doe', '35']`. +#' @param queue The queue settings of the run. +#' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. +#' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. +#' +#' @rdname jobsRunNow +#' @export +jobsRunNow <- function(client, job_id, dbt_commands = NULL, idempotency_token = NULL, + jar_params = NULL, job_parameters = NULL, notebook_params = NULL, pipeline_params = NULL, + python_named_params = NULL, python_params = NULL, queue = NULL, spark_submit_params = NULL, + sql_params = NULL) { + body <- list(dbt_commands = dbt_commands, idempotency_token = idempotency_token, + jar_params = jar_params, job_id = job_id, job_parameters = job_parameters, + notebook_params = notebook_params, pipeline_params = pipeline_params, python_named_params = python_named_params, + python_params = python_params, queue = queue, spark_submit_params = spark_submit_params, + sql_params = sql_params) + client$do("POST", "/api/2.1/jobs/run-now", body = body) +} +#' Set job permissions. +#' +#' Sets permissions on a job. Jobs can inherit permissions from their root +#' object. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param access_control_list This field has no description yet. +#' @param job_id Required. The job for which to get or manage permissions. +#' +#' @rdname jobsSetPermissions +#' @export +jobsSetPermissions <- function(client, job_id, access_control_list = NULL) { + body <- list(access_control_list = access_control_list) + client$do("PUT", paste("/api/2.0/permissions/jobs/", job_id, sep = ""), body = body) +} +#' Create and trigger a one-time run. +#' +#' Submit a one-time run. This endpoint allows you to submit a workload directly +#' without creating a job. Runs submitted using this endpoint don’t display in +#' the UI. Use the `jobs/runs/get` API to check the run state after the job is +#' submitted. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param access_control_list List of permissions to set on the job. +#' @param email_notifications An optional set of email addresses notified when the run begins or completes. +#' @param git_source An optional specification for a remote Git repository containing the source code used by tasks. +#' @param health An optional set of health rules that can be defined for this job. +#' @param idempotency_token An optional token that can be used to guarantee the idempotency of job run requests. +#' @param notification_settings Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this run. +#' @param queue The queue settings of the one-time run. +#' @param run_name An optional name for the run. +#' @param tasks This field has no description yet. +#' @param timeout_seconds An optional timeout applied to each run of this job. +#' @param webhook_notifications A collection of system notification IDs to notify when the run begins or completes. +#' +#' @rdname jobsSubmit +#' @export +jobsSubmit <- function(client, access_control_list = NULL, email_notifications = NULL, + git_source = NULL, health = NULL, idempotency_token = NULL, notification_settings = NULL, + queue = NULL, run_name = NULL, tasks = NULL, timeout_seconds = NULL, webhook_notifications = NULL) { + body <- list(access_control_list = access_control_list, email_notifications = email_notifications, + git_source = git_source, health = health, idempotency_token = idempotency_token, + notification_settings = notification_settings, queue = queue, run_name = run_name, + tasks = tasks, timeout_seconds = timeout_seconds, webhook_notifications = webhook_notifications) + client$do("POST", "/api/2.1/jobs/runs/submit", body = body) +} +#' Update job settings partially. +#' +#' Add, update, or remove specific settings of an existing job. Use the [_Reset_ +#' endpoint](:method:jobs/reset) to overwrite all job settings. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param fields_to_remove Remove top-level fields in the job settings. +#' @param job_id Required. The canonical identifier of the job to update. +#' @param new_settings The new settings for the job. +#' +#' @rdname jobsUpdate +#' @export +jobsUpdate <- function(client, job_id, fields_to_remove = NULL, new_settings = NULL) { + body <- list(fields_to_remove = fields_to_remove, job_id = job_id, new_settings = new_settings) + client$do("POST", "/api/2.1/jobs/update", body = body) +} +#' Update job permissions. +#' +#' Updates the permissions on a job. Jobs can inherit permissions from their +#' root object. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param access_control_list This field has no description yet. +#' @param job_id Required. The job for which to get or manage permissions. +#' +#' @rdname jobsUpdatePermissions +#' @export +jobsUpdatePermissions <- function(client, job_id, access_control_list = NULL) { + body <- list(access_control_list = access_control_list) + client$do("PATCH", paste("/api/2.0/permissions/jobs/", job_id, sep = ""), body = body) +} + +#' Cancel a run. +#' +#' Cancels a job run or a task run. The run is canceled asynchronously, so it +#' may still be running when this request completes. +#' @param client Required. Instance of DatabricksClient() #' #' @description @@ -432,8 +451,144 @@ jobsListRuns <- function(client, active_only = NULL, completed_only = NULL, expa #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param run_id Required. This field is required. +#' +#' @rdname jobsCancelRunAndWait +#' @export +jobsCancelRunAndWait <- function(client, run_id, timeout = 20, callback = cli_reporter) { + body <- list(run_id = run_id) + op_response <- client$do("POST", "/api/2.1/jobs/runs/cancel", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("TERMINATED", "SKIPPED", c()) + failure_states <- c("INTERNAL_ERROR", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- jobsGetRun(client, run_id = run_id) + status <- poll$state$life_cycle_state + status_message <- paste("current status:", status) + if (!is.null(poll$state)) { + status_message <- poll$state$state_message + } + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach TERMINATED or SKIPPED, got ", status, "-", + status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::jobsGetRun(run_id=", run_id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + + + + + + + + +#' Get a single job run. +#' +#' Retrieve the metadata of a run. +#' @param client Required. Instance of DatabricksClient() #' +#' @description +#' This is a long-running operation, which blocks until Jobs on Databricks reach +#' TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Jobs is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param include_history Whether to include the repair history in the response. +#' @param include_resolved_values Whether to include resolved parameter values in the response. +#' @param run_id Required. The canonical identifier of the run for which to retrieve the metadata. +#' +#' @rdname jobsGetRunAndWait +#' @export +jobsGetRunAndWait <- function(client, run_id, include_history = NULL, include_resolved_values = NULL, + timeout = 20, callback = cli_reporter) { + query <- list(include_history = include_history, include_resolved_values = include_resolved_values, + run_id = run_id) + op_response <- client$do("GET", "/api/2.1/jobs/runs/get", query = query) + started <- as.numeric(Sys.time()) + target_states <- c("TERMINATED", "SKIPPED", c()) + failure_states <- c("INTERNAL_ERROR", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- jobsGetRun(client, run_id = op_response$run_id) + status <- poll$state$life_cycle_state + status_message <- paste("current status:", status) + if (!is.null(poll$state)) { + status_message <- poll$state$state_message + } + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach TERMINATED or SKIPPED, got ", status, "-", + status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::jobsGetRun(run_id=", op_response$run_id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + + + + +#' Repair a job run. +#' +#' Re-run one or more tasks. Tasks are re-run as part of the original job run. +#' They use the current job and task settings, and can be viewed in the history +#' for the original job run. +#' @param client Required. Instance of DatabricksClient() + +#' +#' @description +#' This is a long-running operation, which blocks until Jobs on Databricks reach +#' TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Jobs is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. #' #' @param dbt_commands An array of commands to execute for jobs with the dbt task, for example `'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']`. #' @param jar_params A list of parameters for jobs with Spark JAR tasks, for example `'jar_params': ['john doe', '35']`. @@ -450,9 +605,9 @@ jobsListRuns <- function(client, active_only = NULL, completed_only = NULL, expa #' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. #' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. #' -#' @rdname jobsRepairRun +#' @rdname jobsRepairRunAndWait #' @export -jobsRepairRun <- function(client, run_id, dbt_commands = NULL, jar_params = NULL, +jobsRepairRunAndWait <- function(client, run_id, dbt_commands = NULL, jar_params = NULL, job_parameters = NULL, latest_repair_id = NULL, notebook_params = NULL, pipeline_params = NULL, python_named_params = NULL, python_params = NULL, rerun_all_failed_tasks = NULL, rerun_dependent_tasks = NULL, rerun_tasks = NULL, spark_submit_params = NULL, @@ -504,24 +659,6 @@ jobsRepairRun <- function(client, run_id, dbt_commands = NULL, jar_params = NULL rlang::abort(msg, call = rlang::caller_env()) } -#' Update all job settings (reset). -#' -#' Overwrite all settings for the given job. Use the [_Update_ -#' endpoint](:method:jobs/update) to update job settings partially. -#' @param client Required. Instance of DatabricksClient() - - -#' -#' -#' @param job_id Required. The canonical identifier of the job to reset. -#' @param new_settings Required. The new settings of the job. -#' -#' @rdname jobsReset -#' @export -jobsReset <- function(client, job_id, new_settings) { - body <- list(job_id = job_id, new_settings = new_settings) - client$do("POST", "/api/2.1/jobs/reset", body = body) -} #' Trigger a new job run. #' @@ -536,8 +673,6 @@ jobsReset <- function(client, job_id, new_settings) { #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param dbt_commands An array of commands to execute for jobs with the dbt task, for example `'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']`. #' @param idempotency_token An optional token to guarantee the idempotency of job run requests. @@ -552,9 +687,9 @@ jobsReset <- function(client, job_id, new_settings) { #' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. #' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. #' -#' @rdname jobsRunNow +#' @rdname jobsRunNowAndWait #' @export -jobsRunNow <- function(client, job_id, dbt_commands = NULL, idempotency_token = NULL, +jobsRunNowAndWait <- function(client, job_id, dbt_commands = NULL, idempotency_token = NULL, jar_params = NULL, job_parameters = NULL, notebook_params = NULL, pipeline_params = NULL, python_named_params = NULL, python_params = NULL, queue = NULL, spark_submit_params = NULL, sql_params = NULL, timeout = 20, callback = cli_reporter) { @@ -604,24 +739,6 @@ jobsRunNow <- function(client, job_id, dbt_commands = NULL, idempotency_token = rlang::abort(msg, call = rlang::caller_env()) } -#' Set job permissions. -#' -#' Sets permissions on a job. Jobs can inherit permissions from their root -#' object. -#' @param client Required. Instance of DatabricksClient() - - -#' -#' -#' @param access_control_list This field has no description yet. -#' @param job_id Required. The job for which to get or manage permissions. -#' -#' @rdname jobsSetPermissions -#' @export -jobsSetPermissions <- function(client, job_id, access_control_list = NULL) { - body <- list(access_control_list = access_control_list) - client$do("PUT", paste("/api/2.0/permissions/jobs/", job_id, sep = ""), body = body) -} #' Create and trigger a one-time run. #' @@ -639,7 +756,6 @@ jobsSetPermissions <- function(client, job_id, access_control_list = NULL) { #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. - #' #' @param access_control_list List of permissions to set on the job. #' @param email_notifications An optional set of email addresses notified when the run begins or completes. @@ -653,9 +769,9 @@ jobsSetPermissions <- function(client, job_id, access_control_list = NULL) { #' @param timeout_seconds An optional timeout applied to each run of this job. #' @param webhook_notifications A collection of system notification IDs to notify when the run begins or completes. #' -#' @rdname jobsSubmit +#' @rdname jobsSubmitAndWait #' @export -jobsSubmit <- function(client, access_control_list = NULL, email_notifications = NULL, +jobsSubmitAndWait <- function(client, access_control_list = NULL, email_notifications = NULL, git_source = NULL, health = NULL, idempotency_token = NULL, notification_settings = NULL, queue = NULL, run_name = NULL, tasks = NULL, timeout_seconds = NULL, webhook_notifications = NULL, timeout = 20, callback = cli_reporter) { @@ -704,42 +820,5 @@ jobsSubmit <- function(client, access_control_list = NULL, email_notifications = rlang::abort(msg, call = rlang::caller_env()) } -#' Update job settings partially. -#' -#' Add, update, or remove specific settings of an existing job. Use the [_Reset_ -#' endpoint](:method:jobs/reset) to overwrite all job settings. -#' @param client Required. Instance of DatabricksClient() - -#' -#' -#' @param fields_to_remove Remove top-level fields in the job settings. -#' @param job_id Required. The canonical identifier of the job to update. -#' @param new_settings The new settings for the job. -#' -#' @rdname jobsUpdate -#' @export -jobsUpdate <- function(client, job_id, fields_to_remove = NULL, new_settings = NULL) { - body <- list(fields_to_remove = fields_to_remove, job_id = job_id, new_settings = new_settings) - client$do("POST", "/api/2.1/jobs/update", body = body) -} - -#' Update job permissions. -#' -#' Updates the permissions on a job. Jobs can inherit permissions from their -#' root object. -#' @param client Required. Instance of DatabricksClient() - - -#' -#' -#' @param access_control_list This field has no description yet. -#' @param job_id Required. The job for which to get or manage permissions. -#' -#' @rdname jobsUpdatePermissions -#' @export -jobsUpdatePermissions <- function(client, job_id, access_control_list = NULL) { - body <- list(access_control_list = access_control_list) - client$do("PATCH", paste("/api/2.0/permissions/jobs/", job_id, sep = ""), body = body) -} diff --git a/R/lakehouse_monitors.R b/R/lakehouse_monitors.R index be7edc9e..6ddeb4c8 100755 --- a/R/lakehouse_monitors.R +++ b/R/lakehouse_monitors.R @@ -16,9 +16,6 @@ NULL #' Additionally, the call must be made from the workspace where the monitor was #' created. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @param refresh_id Required. ID of the refresh. @@ -30,7 +27,6 @@ lakehouseMonitorsCancelRefresh <- function(client, full_name, refresh_id) { client$do("POST", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes/", refresh_id, "/cancel", , sep = "")) } - #' Create a table monitor. #' #' Creates a new monitor for the specified table. @@ -46,9 +42,6 @@ lakehouseMonitorsCancelRefresh <- function(client, full_name, refresh_id) { #' Workspace assets, such as the dashboard, will be created in the workspace #' where this call was made. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param assets_dir Required. The directory to store monitoring assets (e.g. #' @param baseline_table_name Name of the baseline table from which drift metrics are computed from. @@ -79,7 +72,6 @@ lakehouseMonitorsCreate <- function(client, full_name, assets_dir, output_schema client$do("POST", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor", , sep = ""), body = body) } - #' Delete a table monitor. #' #' Deletes a monitor for the specified table. @@ -96,9 +88,6 @@ lakehouseMonitorsCreate <- function(client, full_name, assets_dir, output_schema #' Note that the metric tables and dashboard will not be deleted as part of this #' call; those assets must be manually cleaned up (if desired). #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @@ -109,7 +98,6 @@ lakehouseMonitorsDelete <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor", , sep = "")) } - #' Get a table monitor. #' #' Gets a monitor for the specified table. @@ -125,9 +113,6 @@ lakehouseMonitorsDelete <- function(client, full_name) { #' dashboard) may be filtered out if the caller is in a different workspace than #' where the monitor was created. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @@ -138,7 +123,6 @@ lakehouseMonitorsGet <- function(client, full_name) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor", , sep = "")) } - #' Get refresh. #' #' Gets info about a specific monitor refresh using the given refresh ID. @@ -152,9 +136,6 @@ lakehouseMonitorsGet <- function(client, full_name) { #' Additionally, the call must be made from the workspace where the monitor was #' created. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @param refresh_id Required. ID of the refresh. @@ -166,7 +147,6 @@ lakehouseMonitorsGetRefresh <- function(client, full_name, refresh_id) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes/", refresh_id, sep = "")) } - #' List refreshes. #' #' Gets an array containing the history of the most recent refreshes (up to 25) @@ -181,9 +161,6 @@ lakehouseMonitorsGetRefresh <- function(client, full_name, refresh_id) { #' Additionally, the call must be made from the workspace where the monitor was #' created. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @@ -194,7 +171,6 @@ lakehouseMonitorsListRefreshes <- function(client, full_name) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes", , sep = "")) } - #' Queue a metric refresh for a monitor. #' #' Queues a metric refresh on the monitor for the specified table. The refresh @@ -209,9 +185,6 @@ lakehouseMonitorsListRefreshes <- function(client, full_name) { #' Additionally, the call must be made from the workspace where the monitor was #' created. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @@ -222,7 +195,6 @@ lakehouseMonitorsRunRefresh <- function(client, full_name) { client$do("POST", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes", , sep = "")) } - #' Update a table monitor. #' #' Updates a monitor for the specified table. @@ -239,9 +211,6 @@ lakehouseMonitorsRunRefresh <- function(client, full_name) { #' Certain configuration fields, such as output asset identifiers, cannot be #' updated. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param baseline_table_name Name of the baseline table from which drift metrics are computed from. #' @param custom_metrics Custom metrics to compute on the monitored table. @@ -269,3 +238,10 @@ lakehouseMonitorsUpdate <- function(client, full_name, output_schema_name, basel , sep = ""), body = body) } + + + + + + + diff --git a/R/lakeview.R b/R/lakeview.R index a2e8a439..abc7d544 100755 --- a/R/lakeview.R +++ b/R/lakeview.R @@ -7,9 +7,6 @@ NULL #' #' Create a draft dashboard. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param display_name Required. The display name of the dashboard. #' @param parent_path The workspace path of the folder containing the dashboard. @@ -24,14 +21,10 @@ lakeviewCreate <- function(client, display_name, parent_path = NULL, serialized_ warehouse_id = warehouse_id) client$do("POST", "/api/2.0/lakeview/dashboards", body = body) } - #' Get dashboard. #' #' Get a draft dashboard. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. UUID identifying the dashboard. #' @@ -41,14 +34,10 @@ lakeviewGet <- function(client, dashboard_id) { client$do("GET", paste("/api/2.0/lakeview/dashboards/", dashboard_id, sep = "")) } - #' Get published dashboard. #' #' Get the current published dashboard. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. UUID identifying the dashboard to be published. #' @@ -59,14 +48,10 @@ lakeviewGetPublished <- function(client, dashboard_id) { client$do("GET", paste("/api/2.0/lakeview/dashboards/", dashboard_id, "/published", , sep = "")) } - #' Publish dashboard. #' #' Publish the current draft dashboard. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. UUID identifying the dashboard to be published. #' @param embed_credentials Flag to indicate if the publisher's credentials should be embedded in the published dashboard. @@ -79,14 +64,10 @@ lakeviewPublish <- function(client, dashboard_id, embed_credentials = NULL, ware client$do("POST", paste("/api/2.0/lakeview/dashboards/", dashboard_id, "/published", , sep = ""), body = body) } - #' Trash dashboard. #' #' Trash a dashboard. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. UUID identifying the dashboard. #' @@ -96,14 +77,10 @@ lakeviewTrash <- function(client, dashboard_id) { client$do("DELETE", paste("/api/2.0/lakeview/dashboards/", dashboard_id, sep = "")) } - #' Update dashboard. #' #' Update a draft dashboard. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dashboard_id Required. UUID identifying the dashboard. #' @param display_name The display name of the dashboard. @@ -121,3 +98,8 @@ lakeviewUpdate <- function(client, dashboard_id, display_name = NULL, etag = NUL body = body) } + + + + + diff --git a/R/libraries.R b/R/libraries.R index b4efd7d7..2f2b50f6 100755 --- a/R/libraries.R +++ b/R/libraries.R @@ -16,7 +16,6 @@ NULL librariesAllClusterStatuses <- function(client) { client$do("GET", "/api/2.0/libraries/all-cluster-statuses") } - #' Get status. #' #' Get the status of libraries on a cluster. A status will be available for all @@ -35,9 +34,6 @@ librariesAllClusterStatuses <- function(client) { #' clusters, but now marked for removal. Within this group there is no order #' guarantee. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. Unique identifier of the cluster whose status should be retrieved. #' @@ -52,7 +48,6 @@ librariesClusterStatus <- function(client, cluster_id) { return(json$library_statuses) } - #' Add a library. #' #' Add libraries to be installed on a cluster. The installation is asynchronous; @@ -62,9 +57,6 @@ librariesClusterStatus <- function(client, cluster_id) { #' union of the libraries specified via this method and the libraries set to be #' installed on all clusters via the libraries UI. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. Unique identifier for the cluster on which to install these libraries. #' @param libraries Required. The libraries to install. @@ -75,16 +67,12 @@ librariesInstall <- function(client, cluster_id, libraries) { body <- list(cluster_id = cluster_id, libraries = libraries) client$do("POST", "/api/2.0/libraries/install", body = body) } - #' Uninstall libraries. #' #' Set libraries to be uninstalled on a cluster. The libraries won't be #' uninstalled until the cluster is restarted. Uninstalling libraries that are #' not installed on the cluster will have no impact but is not an error. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cluster_id Required. Unique identifier for the cluster on which to uninstall these libraries. #' @param libraries Required. The libraries to uninstall. @@ -96,3 +84,6 @@ librariesUninstall <- function(client, cluster_id, libraries) { client$do("POST", "/api/2.0/libraries/uninstall", body = body) } + + + diff --git a/R/metastores.R b/R/metastores.R index f1a0e8cd..a21f1225 100755 --- a/R/metastores.R +++ b/R/metastores.R @@ -9,9 +9,6 @@ NULL #' __workspace_id__ exists, it will be overwritten by the new __metastore_id__ #' and __default_catalog_name__. The caller must be an account admin. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param default_catalog_name Required. The name of the default catalog in the metastore. #' @param metastore_id Required. The unique ID of the metastore. @@ -24,7 +21,6 @@ metastoresAssign <- function(client, workspace_id, metastore_id, default_catalog client$do("PUT", paste("/api/2.1/unity-catalog/workspaces/", workspace_id, "/metastore", , sep = ""), body = body) } - #' Create a metastore. #' #' Creates a new metastore based on a provided name and optional storage root @@ -33,9 +29,6 @@ metastoresAssign <- function(client, workspace_id, metastore_id, default_catalog #' field is set to the empty string (**''**), the ownership is assigned to the #' System User instead. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The user-specified name of the metastore. #' @param region Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). @@ -47,7 +40,6 @@ metastoresCreate <- function(client, name, region = NULL, storage_root = NULL) { body <- list(name = name, region = region, storage_root = storage_root) client$do("POST", "/api/2.1/unity-catalog/metastores", body = body) } - #' Get metastore assignment for workspace. #' #' Gets the metastore assignment for the workspace being accessed. @@ -58,14 +50,10 @@ metastoresCreate <- function(client, name, region = NULL, storage_root = NULL) { metastoresCurrent <- function(client) { client$do("GET", "/api/2.1/unity-catalog/current-metastore-assignment") } - #' Delete a metastore. #' #' Deletes a metastore. The caller must be a metastore admin. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param force Force deletion even if the metastore is not empty. #' @param id Required. Unique ID of the metastore. @@ -77,15 +65,11 @@ metastoresDelete <- function(client, id, force = NULL) { client$do("DELETE", paste("/api/2.1/unity-catalog/metastores/", id, sep = ""), query = query) } - #' Get a metastore. #' #' Gets a metastore that matches the supplied ID. The caller must be a metastore #' admin to retrieve this info. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID of the metastore. #' @@ -95,7 +79,6 @@ metastoresGet <- function(client, id) { client$do("GET", paste("/api/2.1/unity-catalog/metastores/", id, sep = "")) } - #' List metastores. #' #' Gets an array of the available metastores (as __MetastoreInfo__ objects). The @@ -113,7 +96,6 @@ metastoresList <- function(client) { return(json$metastores) } - #' Get a metastore summary. #' #' Gets information about a metastore. This summary includes the storage @@ -125,14 +107,10 @@ metastoresList <- function(client) { metastoresSummary <- function(client) { client$do("GET", "/api/2.1/unity-catalog/metastore_summary") } - #' Delete an assignment. #' #' Deletes a metastore assignment. The caller must be an account administrator. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param metastore_id Required. Query for the ID of the metastore to delete. #' @param workspace_id Required. A workspace ID. @@ -144,16 +122,12 @@ metastoresUnassign <- function(client, workspace_id, metastore_id) { client$do("DELETE", paste("/api/2.1/unity-catalog/workspaces/", workspace_id, "/metastore", , sep = ""), query = query) } - #' Update a metastore. #' #' Updates information for a specific metastore. The caller must be a metastore #' admin. If the __owner__ field is set to the empty string (**''**), the #' ownership is updated to the System User. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param delta_sharing_organization_name The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name. #' @param delta_sharing_recipient_token_lifetime_in_seconds The lifetime of delta sharing recipient token in seconds. @@ -176,7 +150,6 @@ metastoresUpdate <- function(client, id, delta_sharing_organization_name = NULL, client$do("PATCH", paste("/api/2.1/unity-catalog/metastores/", id, sep = ""), body = body) } - #' Update an assignment. #' #' Updates a metastore assignment. This operation can be used to update @@ -185,9 +158,6 @@ metastoresUpdate <- function(client, id, delta_sharing_organization_name = NULL, #' admin to update __metastore_id__; otherwise, the caller can be a Workspace #' admin. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param default_catalog_name The name of the default catalog for the metastore. #' @param metastore_id The unique ID of the metastore. @@ -202,3 +172,12 @@ metastoresUpdateAssignment <- function(client, workspace_id, default_catalog_nam "/metastore", , sep = ""), body = body) } + + + + + + + + + diff --git a/R/model_registry.R b/R/model_registry.R index 17165f40..9f5f9659 100755 --- a/R/model_registry.R +++ b/R/model_registry.R @@ -7,9 +7,6 @@ NULL #' #' Approves a model version stage transition request. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param archive_existing_versions Required. Specifies whether to archive all current model versions in the target stage. #' @param comment User-provided comment on the action. @@ -25,16 +22,12 @@ modelRegistryApproveTransitionRequest <- function(client, name, version, stage, name = name, stage = stage, version = version) client$do("POST", "/api/2.0/mlflow/transition-requests/approve", body = body) } - #' Post a comment. #' #' Posts a comment on a model version. A comment can be submitted either by a #' user or programmatically to display relevant information about the model. For #' example, test results or deployment errors. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment Required. User-provided comment on the action. #' @param name Required. Name of the model. @@ -46,7 +39,6 @@ modelRegistryCreateComment <- function(client, name, version, comment) { body <- list(comment = comment, name = name, version = version) client$do("POST", "/api/2.0/mlflow/comments/create", body = body) } - #' Create a model. #' #' Creates a new registered model with the name specified in the request body. @@ -54,9 +46,6 @@ modelRegistryCreateComment <- function(client, name, version, comment) { #' Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name #' exists. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param description Optional description for registered model. #' @param name Required. Register models under this name. @@ -68,14 +57,10 @@ modelRegistryCreateModel <- function(client, name, description = NULL, tags = NU body <- list(description = description, name = name, tags = tags) client$do("POST", "/api/2.0/mlflow/registered-models/create", body = body) } - #' Create a model version. #' #' Creates a model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param description Optional description for model version. #' @param name Required. Register model under this name. @@ -92,14 +77,10 @@ modelRegistryCreateModelVersion <- function(client, name, source, description = source = source, tags = tags) client$do("POST", "/api/2.0/mlflow/model-versions/create", body = body) } - #' Make a transition request. #' #' Creates a model version stage transition request. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided comment on the action. #' @param name Required. Name of the model. @@ -112,16 +93,12 @@ modelRegistryCreateTransitionRequest <- function(client, name, version, stage, c body <- list(comment = comment, name = name, stage = stage, version = version) client$do("POST", "/api/2.0/mlflow/transition-requests/create", body = body) } - #' Create a webhook. #' #' **NOTE**: This endpoint is in Public Preview. #' #' Creates a registry webhook. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param description User-specified description for the webhook. #' @param events Required. Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A new model version was created for the associated model. @@ -138,14 +115,10 @@ modelRegistryCreateWebhook <- function(client, events, description = NULL, http_ job_spec = job_spec, model_name = model_name, status = status) client$do("POST", "/api/2.0/mlflow/registry-webhooks/create", body = body) } - #' Delete a comment. #' #' Deletes a comment on a model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. This field has no description yet. #' @@ -155,14 +128,10 @@ modelRegistryDeleteComment <- function(client, id) { query <- list(id = id) client$do("DELETE", "/api/2.0/mlflow/comments/delete", query = query) } - #' Delete a model. #' #' Deletes a registered model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Registered model unique name identifier. #' @@ -172,14 +141,10 @@ modelRegistryDeleteModel <- function(client, name) { query <- list(name = name) client$do("DELETE", "/api/2.0/mlflow/registered-models/delete", query = query) } - #' Delete a model tag. #' #' Deletes the tag for a registered model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the tag. #' @param name Required. Name of the registered model that the tag was logged under. @@ -190,14 +155,10 @@ modelRegistryDeleteModelTag <- function(client, name, key) { query <- list(key = key, name = name) client$do("DELETE", "/api/2.0/mlflow/registered-models/delete-tag", query = query) } - #' Delete a model version. #' #' Deletes a model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the registered model. #' @param version Required. Model version number. @@ -208,14 +169,10 @@ modelRegistryDeleteModelVersion <- function(client, name, version) { query <- list(name = name, version = version) client$do("DELETE", "/api/2.0/mlflow/model-versions/delete", query = query) } - #' Delete a model version tag. #' #' Deletes a model version tag. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the tag. #' @param name Required. Name of the registered model that the tag was logged under. @@ -227,14 +184,10 @@ modelRegistryDeleteModelVersionTag <- function(client, name, version, key) { query <- list(key = key, name = name, version = version) client$do("DELETE", "/api/2.0/mlflow/model-versions/delete-tag", query = query) } - #' Delete a transition request. #' #' Cancels a model version stage transition request. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided comment on the action. #' @param creator Required. Username of the user who created this request. @@ -250,15 +203,12 @@ modelRegistryDeleteTransitionRequest <- function(client, name, version, stage, c version = version) client$do("DELETE", "/api/2.0/mlflow/transition-requests/delete", query = query) } - #' Delete a webhook. #' #' **NOTE:** This endpoint is in Public Preview. #' #' Deletes a registry webhook. #' @param client Required. Instance of DatabricksClient() - - #' #' @param id Webhook ID required to delete a registry webhook. #' @@ -268,14 +218,10 @@ modelRegistryDeleteWebhook <- function(client, id = NULL) { query <- list(id = id) client$do("DELETE", "/api/2.0/mlflow/registry-webhooks/delete", query = query) } - #' Get the latest version. #' #' Gets the latest version of a registered model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Registered model unique name identifier. #' @param stages List of stages. @@ -292,7 +238,6 @@ modelRegistryGetLatestVersions <- function(client, name, stages = NULL) { return(json$model_versions) } - #' Get model. #' #' Get the details of a model. This is a Databricks workspace version of the @@ -301,9 +246,6 @@ modelRegistryGetLatestVersions <- function(client, name, stages = NULL) { #' #' MLflow endpoint: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Registered model unique name identifier. #' @@ -313,14 +255,10 @@ modelRegistryGetModel <- function(client, name) { query <- list(name = name) client$do("GET", "/api/2.0/mlflow/databricks/registered-models/get", query = query) } - #' Get a model version. #' #' Get a model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the registered model. #' @param version Required. Model version number. @@ -331,14 +269,10 @@ modelRegistryGetModelVersion <- function(client, name, version) { query <- list(name = name, version = version) client$do("GET", "/api/2.0/mlflow/model-versions/get", query = query) } - #' Get a model version URI. #' #' Gets a URI to download the model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the registered model. #' @param version Required. Model version number. @@ -349,14 +283,10 @@ modelRegistryGetModelVersionDownloadUri <- function(client, name, version) { query <- list(name = name, version = version) client$do("GET", "/api/2.0/mlflow/model-versions/get-download-uri", query = query) } - #' Get registered model permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param registered_model_id Required. The registered model for which to get or manage permissions. #' @@ -367,15 +297,11 @@ modelRegistryGetPermissionLevels <- function(client, registered_model_id) { client$do("GET", paste("/api/2.0/permissions/registered-models/", registered_model_id, "/permissionLevels", , sep = "")) } - #' Get registered model permissions. #' #' Gets the permissions of a registered model. Registered models can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param registered_model_id Required. The registered model for which to get or manage permissions. #' @@ -386,14 +312,11 @@ modelRegistryGetPermissions <- function(client, registered_model_id) { client$do("GET", paste("/api/2.0/permissions/registered-models/", registered_model_id, sep = "")) } - #' List models. #' #' Lists all available registered models, up to the limit specified in #' __max_results__. #' @param client Required. Instance of DatabricksClient() - - #' #' @param max_results Maximum number of registered models desired. #' @param page_token Pagination token to go to the next page based on a previous query. @@ -421,14 +344,10 @@ modelRegistryListModels <- function(client, max_results = NULL, page_token = NUL return(results) } - #' List transition requests. #' #' Gets a list of all open stage transition requests for the model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the model. #' @param version Required. Version of the model. @@ -444,15 +363,12 @@ modelRegistryListTransitionRequests <- function(client, name, version) { return(json$requests) } - #' List registry webhooks. #' #' **NOTE:** This endpoint is in Public Preview. #' #' Lists all registry webhooks. #' @param client Required. Instance of DatabricksClient() - - #' #' @param events If `events` is specified, any webhook with one or more of the specified trigger events is included in the output. #' @param model_name If not specified, all webhooks associated with the specified events are listed, regardless of their associated model. @@ -481,14 +397,10 @@ modelRegistryListWebhooks <- function(client, events = NULL, model_name = NULL, return(results) } - #' Reject a transition request. #' #' Rejects a model version stage transition request. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided comment on the action. #' @param name Required. Name of the model. @@ -501,14 +413,10 @@ modelRegistryRejectTransitionRequest <- function(client, name, version, stage, c body <- list(comment = comment, name = name, stage = stage, version = version) client$do("POST", "/api/2.0/mlflow/transition-requests/reject", body = body) } - #' Rename a model. #' #' Renames a registered model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Registered model unique name identifier. #' @param new_name If provided, updates the name for this `registered_model`. @@ -519,13 +427,10 @@ modelRegistryRenameModel <- function(client, name, new_name = NULL) { body <- list(name = name, new_name = new_name) client$do("POST", "/api/2.0/mlflow/registered-models/rename", body = body) } - #' Searches model versions. #' #' Searches for specific model versions based on the supplied __filter__. #' @param client Required. Instance of DatabricksClient() - - #' #' @param filter String filter condition, like 'name='my-model-name''. #' @param max_results Maximum number of models desired. @@ -557,13 +462,10 @@ modelRegistrySearchModelVersions <- function(client, filter = NULL, max_results return(results) } - #' Search models. #' #' Search for registered models based on the specified __filter__. #' @param client Required. Instance of DatabricksClient() - - #' #' @param filter String filter condition, like 'name LIKE 'my-model-name''. #' @param max_results Maximum number of models desired. @@ -595,14 +497,10 @@ modelRegistrySearchModels <- function(client, filter = NULL, max_results = NULL, return(results) } - #' Set a tag. #' #' Sets a tag on a registered model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the tag. #' @param name Required. Unique name of the model. @@ -614,14 +512,10 @@ modelRegistrySetModelTag <- function(client, name, key, value) { body <- list(key = key, name = name, value = value) client$do("POST", "/api/2.0/mlflow/registered-models/set-tag", body = body) } - #' Set a version tag. #' #' Sets a model version tag. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the tag. #' @param name Required. Unique name of the model. @@ -634,15 +528,11 @@ modelRegistrySetModelVersionTag <- function(client, name, version, key, value) { body <- list(key = key, name = name, value = value, version = version) client$do("POST", "/api/2.0/mlflow/model-versions/set-tag", body = body) } - #' Set registered model permissions. #' #' Sets permissions on a registered model. Registered models can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param registered_model_id Required. The registered model for which to get or manage permissions. @@ -654,16 +544,12 @@ modelRegistrySetPermissions <- function(client, registered_model_id, access_cont client$do("PUT", paste("/api/2.0/permissions/registered-models/", registered_model_id, sep = ""), body = body) } - #' Test a webhook. #' #' **NOTE:** This endpoint is in Public Preview. #' #' Tests a registry webhook. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param event If `event` is specified, the test trigger uses the specified event. #' @param id Required. Webhook ID. @@ -674,7 +560,6 @@ modelRegistryTestRegistryWebhook <- function(client, id, event = NULL) { body <- list(event = event, id = id) client$do("POST", "/api/2.0/mlflow/registry-webhooks/test", body = body) } - #' Transition a stage. #' #' Transition a model version's stage. This is a Databricks workspace version of @@ -683,9 +568,6 @@ modelRegistryTestRegistryWebhook <- function(client, id, event = NULL) { #' #' MLflow endpoint: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param archive_existing_versions Required. Specifies whether to archive all current model versions in the target stage. #' @param comment User-provided comment on the action. @@ -702,14 +584,10 @@ modelRegistryTransitionStage <- function(client, name, version, stage, archive_e client$do("POST", "/api/2.0/mlflow/databricks/model-versions/transition-stage", body = body) } - #' Update a comment. #' #' Post an edit to a comment on a model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment Required. User-provided comment on the action. #' @param id Required. Unique identifier of an activity. @@ -720,14 +598,10 @@ modelRegistryUpdateComment <- function(client, id, comment) { body <- list(comment = comment, id = id) client$do("PATCH", "/api/2.0/mlflow/comments/update", body = body) } - #' Update model. #' #' Updates a registered model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param description If provided, updates the description for this `registered_model`. #' @param name Required. Registered model unique name identifier. @@ -738,14 +612,10 @@ modelRegistryUpdateModel <- function(client, name, description = NULL) { body <- list(description = description, name = name) client$do("PATCH", "/api/2.0/mlflow/registered-models/update", body = body) } - #' Update model version. #' #' Updates the model version. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param description If provided, updates the description for this `registered_model`. #' @param name Required. Name of the registered model. @@ -757,15 +627,11 @@ modelRegistryUpdateModelVersion <- function(client, name, version, description = body <- list(description = description, name = name, version = version) client$do("PATCH", "/api/2.0/mlflow/model-versions/update", body = body) } - #' Update registered model permissions. #' #' Updates the permissions on a registered model. Registered models can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param registered_model_id Required. The registered model for which to get or manage permissions. @@ -777,16 +643,12 @@ modelRegistryUpdatePermissions <- function(client, registered_model_id, access_c client$do("PATCH", paste("/api/2.0/permissions/registered-models/", registered_model_id, sep = ""), body = body) } - #' Update a webhook. #' #' **NOTE:** This endpoint is in Public Preview. #' #' Updates a registry webhook. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param description User-specified description for the webhook. #' @param events Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A new model version was created for the associated model. @@ -804,3 +666,38 @@ modelRegistryUpdateWebhook <- function(client, id, description = NULL, events = client$do("PATCH", "/api/2.0/mlflow/registry-webhooks/update", body = body) } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/R/model_versions.R b/R/model_versions.R index 95d55b2c..7fb5e875 100755 --- a/R/model_versions.R +++ b/R/model_versions.R @@ -13,9 +13,6 @@ NULL #' **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** #' privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. The three-level (fully qualified) name of the model version. #' @param version Required. The integer version number of the model version. @@ -27,7 +24,6 @@ modelVersionsDelete <- function(client, full_name, version) { client$do("DELETE", paste("/api/2.1/unity-catalog/models/", full_name, "/versions/", version, sep = "")) } - #' Get a Model Version. #' #' Get a model version. @@ -37,9 +33,6 @@ modelVersionsDelete <- function(client, full_name, version) { #' must also be the owner or have the **USE_CATALOG** privilege on the parent #' catalog and the **USE_SCHEMA** privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. The three-level (fully qualified) name of the model version. #' @param include_browse Whether to include model versions in the response for which the principal can only access selective metadata for. @@ -52,7 +45,6 @@ modelVersionsGet <- function(client, full_name, version, include_browse = NULL) client$do("GET", paste("/api/2.1/unity-catalog/models/", full_name, "/versions/", version, sep = ""), query = query) } - #' Get Model Version By Alias. #' #' Get a model version by alias. @@ -62,9 +54,6 @@ modelVersionsGet <- function(client, full_name, version, include_browse = NULL) #' be the owner or have the **USE_CATALOG** privilege on the parent catalog and #' the **USE_SCHEMA** privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param alias Required. The name of the alias. #' @param full_name Required. The three-level (fully qualified) name of the registered model. @@ -76,7 +65,6 @@ modelVersionsGetByAlias <- function(client, full_name, alias) { client$do("GET", paste("/api/2.1/unity-catalog/models/", full_name, "/aliases/", alias, sep = "")) } - #' List Model Versions. #' #' List model versions. You can list model versions under a particular schema, @@ -92,9 +80,6 @@ modelVersionsGetByAlias <- function(client, full_name, alias) { #' #' There is no guarantee of a specific ordering of the elements in the response. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. The full three-level name of the registered model under which to list model versions. #' @param include_browse Whether to include model versions in the response for which the principal can only access selective metadata for. @@ -126,7 +111,6 @@ modelVersionsList <- function(client, full_name, include_browse = NULL, max_resu return(results) } - #' Update a Model Version. #' #' Updates the specified model version. @@ -138,9 +122,6 @@ modelVersionsList <- function(client, full_name, include_browse = NULL, max_resu #' #' Currently only the comment of the model version can be updated. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment The comment attached to the model version. #' @param full_name Required. The three-level (fully qualified) name of the model version. @@ -154,3 +135,7 @@ modelVersionsUpdate <- function(client, full_name, version, comment = NULL) { version, sep = ""), body = body) } + + + + diff --git a/R/online_tables.R b/R/online_tables.R index c185de84..b0eb8847 100755 --- a/R/online_tables.R +++ b/R/online_tables.R @@ -7,8 +7,6 @@ NULL #' #' Create a new Online Table. #' @param client Required. Instance of DatabricksClient() - - #' #' @param name Full three-part (catalog, schema, table) name of the table. #' @param spec Specification of the online table. @@ -19,16 +17,12 @@ onlineTablesCreate <- function(client, name = NULL, spec = NULL) { body <- list(name = name, spec = spec) client$do("POST", "/api/2.0/online-tables", body = body) } - #' Delete an Online Table. #' #' Delete an online table. Warning: This will delete all the data in the online #' table. If the source Delta table was deleted or modified since this Online #' Table was created, this will lose the data forever! #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Full three-part (catalog, schema, table) name of the table. #' @@ -38,14 +32,10 @@ onlineTablesDelete <- function(client, name) { client$do("DELETE", paste("/api/2.0/online-tables/", name, sep = "")) } - #' Get an Online Table. #' #' Get information about an existing online table and its status. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Full three-part (catalog, schema, table) name of the table. #' @@ -56,3 +46,5 @@ onlineTablesGet <- function(client, name) { client$do("GET", paste("/api/2.0/online-tables/", name, sep = "")) } + + diff --git a/R/permission_migration.R b/R/permission_migration.R index 0c865b2d..423c4d6a 100755 --- a/R/permission_migration.R +++ b/R/permission_migration.R @@ -8,9 +8,6 @@ NULL #' Migrate a batch of permissions from a workspace local group to an account #' group. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param from_workspace_group_name Required. The name of the workspace group that permissions will be migrated from. #' @param size The maximum number of permissions that will be migrated. diff --git a/R/permissions.R b/R/permissions.R index 820d0a2a..8d1292dd 100755 --- a/R/permissions.R +++ b/R/permissions.R @@ -8,9 +8,6 @@ NULL #' Gets the permissions of an object. Objects can inherit permissions from their #' parent objects or root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param request_object_id Required. The id of the request object. #' @param request_object_type Required. The type of the request object. @@ -22,14 +19,10 @@ permissionsGet <- function(client, request_object_type, request_object_id) { client$do("GET", paste("/api/2.0/permissions/", request_object_type, "/", request_object_id, sep = "")) } - #' Get object permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param request_object_id Required. . #' @param request_object_type Required. . @@ -41,15 +34,11 @@ permissionsGetPermissionLevels <- function(client, request_object_type, request_ client$do("GET", paste("/api/2.0/permissions/", request_object_type, "/", request_object_id, "/permissionLevels", , sep = "")) } - #' Set object permissions. #' #' Sets permissions on an object. Objects can inherit permissions from their #' parent objects or root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param request_object_id Required. The id of the request object. @@ -62,15 +51,11 @@ permissionsSet <- function(client, request_object_type, request_object_id, acces client$do("PUT", paste("/api/2.0/permissions/", request_object_type, "/", request_object_id, sep = ""), body = body) } - #' Update object permissions. #' #' Updates the permissions on an object. Objects can inherit permissions from #' their parent objects or root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param request_object_id Required. The id of the request object. @@ -84,3 +69,6 @@ permissionsUpdate <- function(client, request_object_type, request_object_id, ac sep = ""), body = body) } + + + diff --git a/R/pipelines.R b/R/pipelines.R index 890224dd..1c191795 100755 --- a/R/pipelines.R +++ b/R/pipelines.R @@ -8,8 +8,6 @@ NULL #' Creates a new data processing pipeline based on the requested configuration. #' If successful, this method returns the ID of the new pipeline. #' @param client Required. Instance of DatabricksClient() - - #' #' @param allow_duplicate_names If false, deployment will fail if name conflicts with that of another pipeline. #' @param catalog A catalog in Unity Catalog to publish data from this pipeline to. @@ -46,14 +44,10 @@ pipelinesCreate <- function(client, allow_duplicate_names = NULL, catalog = NULL trigger = trigger) client$do("POST", "/api/2.0/pipelines", body = body) } - #' Delete a pipeline. #' #' Deletes a pipeline. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param pipeline_id Required. This field has no description yet. #' @@ -63,12 +57,8 @@ pipelinesDelete <- function(client, pipeline_id) { client$do("DELETE", paste("/api/2.0/pipelines/", pipeline_id, sep = "")) } - #' Get a pipeline. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param pipeline_id Required. This field has no description yet. #' @@ -78,14 +68,10 @@ pipelinesGet <- function(client, pipeline_id) { client$do("GET", paste("/api/2.0/pipelines/", pipeline_id, sep = "")) } - #' Get pipeline permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param pipeline_id Required. The pipeline for which to get or manage permissions. #' @@ -96,15 +82,11 @@ pipelinesGetPermissionLevels <- function(client, pipeline_id) { client$do("GET", paste("/api/2.0/permissions/pipelines/", pipeline_id, "/permissionLevels", , sep = "")) } - #' Get pipeline permissions. #' #' Gets the permissions of a pipeline. Pipelines can inherit permissions from #' their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param pipeline_id Required. The pipeline for which to get or manage permissions. #' @@ -114,14 +96,10 @@ pipelinesGetPermissions <- function(client, pipeline_id) { client$do("GET", paste("/api/2.0/permissions/pipelines/", pipeline_id, sep = "")) } - #' Get a pipeline update. #' #' Gets an update from an active pipeline. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param pipeline_id Required. The ID of the pipeline. #' @param update_id Required. The ID of the update. @@ -133,14 +111,10 @@ pipelinesGetUpdate <- function(client, pipeline_id, update_id) { client$do("GET", paste("/api/2.0/pipelines/", pipeline_id, "/updates/", update_id, sep = "")) } - #' List pipeline events. #' #' Retrieves events for a pipeline. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param filter Criteria to select a subset of results, expressed using a SQL-like syntax. #' @param max_results Max number of entries to return in a single page. @@ -174,13 +148,10 @@ pipelinesListPipelineEvents <- function(client, pipeline_id, filter = NULL, max_ return(results) } - #' List pipelines. #' #' Lists pipelines defined in the Delta Live Tables system. #' @param client Required. Instance of DatabricksClient() - - #' #' @param filter Select a subset of results based on the specified criteria. #' @param max_results The maximum number of entries to return in a single page. @@ -212,14 +183,10 @@ pipelinesListPipelines <- function(client, filter = NULL, max_results = NULL, or return(results) } - #' List pipeline updates. #' #' List updates for an active pipeline. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param max_results Max number of entries to return in a single page. #' @param page_token Page token returned by previous call. @@ -234,15 +201,11 @@ pipelinesListUpdates <- function(client, pipeline_id, max_results = NULL, page_t client$do("GET", paste("/api/2.0/pipelines/", pipeline_id, "/updates", , sep = ""), query = query) } - #' Set pipeline permissions. #' #' Sets permissions on a pipeline. Pipelines can inherit permissions from their #' root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param pipeline_id Required. The pipeline for which to get or manage permissions. @@ -254,16 +217,12 @@ pipelinesSetPermissions <- function(client, pipeline_id, access_control_list = N client$do("PUT", paste("/api/2.0/permissions/pipelines/", pipeline_id, sep = ""), body = body) } - #' Start a pipeline. #' #' Starts a new update for the pipeline. If there is already an active update #' for the pipeline, the request will fail and the active update will remain #' running. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cause This field has no description yet. #' @param full_refresh If true, this update will reset all tables before running. @@ -281,75 +240,24 @@ pipelinesStartUpdate <- function(client, pipeline_id, cause = NULL, full_refresh client$do("POST", paste("/api/2.0/pipelines/", pipeline_id, "/updates", , sep = ""), body = body) } - #' Stop a pipeline. #' #' Stops the pipeline by canceling the active update. If there is no active #' update for the pipeline, this request is a no-op. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Pipelines on Databricks reach -#' IDLE state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Pipelines is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param pipeline_id Required. This field has no description yet. #' #' @rdname pipelinesStop #' @export -pipelinesStop <- function(client, pipeline_id, timeout = 20, callback = cli_reporter) { +pipelinesStop <- function(client, pipeline_id) { - op_response <- client$do("POST", paste("/api/2.0/pipelines/", pipeline_id, "/stop", - , sep = "")) - started <- as.numeric(Sys.time()) - target_states <- c("IDLE", c()) - failure_states <- c("FAILED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- pipelinesGet(client, pipeline_id = pipeline_id) - status <- poll$state - status_message <- poll$cause - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach IDLE, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::pipelinesGet(pipeline_id=", pipeline_id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", paste("/api/2.0/pipelines/", pipeline_id, "/stop", , sep = "")) } - #' Edit a pipeline. #' #' Updates a pipeline with the supplied configuration. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param allow_duplicate_names If false, deployment will fail if name has changed and conflicts the name of another pipeline. #' @param catalog A catalog in Unity Catalog to publish data from this pipeline to. @@ -387,15 +295,11 @@ pipelinesUpdate <- function(client, pipeline_id, allow_duplicate_names = NULL, c target = target, trigger = trigger) client$do("PUT", paste("/api/2.0/pipelines/", pipeline_id, sep = ""), body = body) } - #' Update pipeline permissions. #' #' Updates the permissions on a pipeline. Pipelines can inherit permissions from #' their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param pipeline_id Required. The pipeline for which to get or manage permissions. @@ -408,3 +312,74 @@ pipelinesUpdatePermissions <- function(client, pipeline_id, access_control_list body = body) } + + + + + + + + + + +#' Stop a pipeline. +#' +#' Stops the pipeline by canceling the active update. If there is no active +#' update for the pipeline, this request is a no-op. +#' @param client Required. Instance of DatabricksClient() + +#' +#' @description +#' This is a long-running operation, which blocks until Pipelines on Databricks reach +#' IDLE state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Pipelines is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param pipeline_id Required. This field has no description yet. +#' +#' @rdname pipelinesStopAndWait +#' @export +pipelinesStopAndWait <- function(client, pipeline_id, timeout = 20, callback = cli_reporter) { + + op_response <- client$do("POST", paste("/api/2.0/pipelines/", pipeline_id, "/stop", + , sep = "")) + started <- as.numeric(Sys.time()) + target_states <- c("IDLE", c()) + failure_states <- c("FAILED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- pipelinesGet(client, pipeline_id = pipeline_id) + status <- poll$state + status_message <- poll$cause + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach IDLE, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::pipelinesGet(pipeline_id=", pipeline_id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + + + diff --git a/R/policy_families.R b/R/policy_families.R index 8d3983c3..b2642070 100755 --- a/R/policy_families.R +++ b/R/policy_families.R @@ -7,9 +7,6 @@ NULL #' #' Retrieve the information for an policy family based on its identifier. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param policy_family_id Required. This field has no description yet. #' @@ -19,13 +16,10 @@ policyFamiliesGet <- function(client, policy_family_id) { client$do("GET", paste("/api/2.0/policy-families/", policy_family_id, sep = "")) } - #' List policy families. #' #' Retrieve a list of policy families. This API is paginated. #' @param client Required. Instance of DatabricksClient() - - #' #' @param max_results The max number of policy families to return. #' @param page_token A token that can be used to get the next page of results. @@ -54,3 +48,4 @@ policyFamiliesList <- function(client, max_results = NULL, page_token = NULL) { } + diff --git a/R/providers.R b/R/providers.R index 6da2e8f6..093d8505 100755 --- a/R/providers.R +++ b/R/providers.R @@ -8,9 +8,6 @@ NULL #' Creates a new authentication provider minimally based on a name and #' authentication type. The caller must be an admin on the metastore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param authentication_type Required. The delta sharing authentication type. #' @param comment Description about the provider. @@ -24,15 +21,11 @@ providersCreate <- function(client, name, authentication_type, comment = NULL, r recipient_profile_str = recipient_profile_str) client$do("POST", "/api/2.1/unity-catalog/providers", body = body) } - #' Delete a provider. #' #' Deletes an authentication provider, if the caller is a metastore admin or is #' the owner of the provider. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the provider. #' @@ -42,16 +35,12 @@ providersDelete <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/providers/", name, sep = "")) } - #' Get a provider. #' #' Gets a specific authentication provider. The caller must supply the name of #' the provider, and must either be a metastore admin or the owner of the #' provider. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the provider. #' @@ -61,7 +50,6 @@ providersGet <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/providers/", name, sep = "")) } - #' List providers. #' #' Gets an array of available authentication providers. The caller must either @@ -69,8 +57,6 @@ providersGet <- function(client, name) { #' the caller are not included in the response. There is no guarantee of a #' specific ordering of the elements in the array. #' @param client Required. Instance of DatabricksClient() - - #' #' @param data_provider_global_metastore_id If not provided, all providers will be returned. #' @@ -85,16 +71,12 @@ providersList <- function(client, data_provider_global_metastore_id = NULL) { return(json$providers) } - #' List shares by Provider. #' #' Gets an array of a specified provider's shares within the metastore where: #' #' * the caller is a metastore admin, or * the caller is the owner. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the provider in which to list shares. #' @@ -110,7 +92,6 @@ providersListShares <- function(client, name) { return(json$shares) } - #' Update a provider. #' #' Updates the information for an authentication provider, if the caller is a @@ -118,9 +99,6 @@ providersListShares <- function(client, name) { #' provider name, the caller must be both a metastore admin and the owner of the #' provider. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment Description about the provider. #' @param name Required. Name of the provider. @@ -137,3 +115,8 @@ providersUpdate <- function(client, name, comment = NULL, new_name = NULL, owner body = body) } + + + + + diff --git a/R/queries.R b/R/queries.R index a028589b..07041f53 100755 --- a/R/queries.R +++ b/R/queries.R @@ -15,8 +15,6 @@ NULL #' #' **Note**: You cannot add a visualization until you create the query. #' @param client Required. Instance of DatabricksClient() - - #' #' @param data_source_id Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID. #' @param description General description that conveys additional information about this query such as usage notes. @@ -34,16 +32,12 @@ queriesCreate <- function(client, data_source_id = NULL, description = NULL, nam options = options, parent = parent, query = query, run_as_role = run_as_role) client$do("POST", "/api/2.0/preview/sql/queries", body = body) } - #' Delete a query. #' #' Moves a query to the trash. Trashed queries immediately disappear from #' searches and list views, and they cannot be used for alerts. The trash is #' deleted after 30 days. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param query_id Required. This field has no description yet. #' @@ -53,15 +47,11 @@ queriesDelete <- function(client, query_id) { client$do("DELETE", paste("/api/2.0/preview/sql/queries/", query_id, sep = "")) } - #' Get a query definition. #' #' Retrieve a query object definition along with contextual permissions #' information about the currently authenticated user. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param query_id Required. This field has no description yet. #' @@ -71,7 +61,6 @@ queriesGet <- function(client, query_id) { client$do("GET", paste("/api/2.0/preview/sql/queries/", query_id, sep = "")) } - #' Get a list of queries. #' #' Gets a list of queries. Optionally, this list can be filtered by a search @@ -80,8 +69,6 @@ queriesGet <- function(client, query_id) { #' ### **Warning: Calling this API concurrently 10 or more times could result in #' throttling, service degradation, or a temporary ban.** #' @param client Required. Instance of DatabricksClient() - - #' #' @param order Name of query attribute to order by. #' @param page Page number to retrieve. @@ -111,15 +98,11 @@ queriesList <- function(client, order = NULL, page = NULL, page_size = NULL, q = return(results) } - #' Restore a query. #' #' Restore a query that has been moved to the trash. A restored query appears in #' list views and searches. You can use restored queries for alerts. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param query_id Required. This field has no description yet. #' @@ -129,16 +112,12 @@ queriesRestore <- function(client, query_id) { client$do("POST", paste("/api/2.0/preview/sql/queries/trash/", query_id, sep = "")) } - #' Change a query definition. #' #' Modify this query definition. #' #' **Note**: You cannot undo this operation. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param data_source_id Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID. #' @param description General description that conveys additional information about this query such as usage notes. @@ -158,3 +137,8 @@ queriesUpdate <- function(client, query_id, data_source_id = NULL, description = body = body) } + + + + + diff --git a/R/query_history.R b/R/query_history.R index eb3445a7..6fc1a687 100755 --- a/R/query_history.R +++ b/R/query_history.R @@ -9,8 +9,6 @@ NULL #' #' You can filter by user ID, warehouse ID, status, and time range. #' @param client Required. Instance of DatabricksClient() - - #' #' @param filter_by A filter to limit query history results. #' @param include_metrics Whether to include metrics about query. diff --git a/R/query_visualizations.R b/R/query_visualizations.R index 241b9cc8..cff13c70 100755 --- a/R/query_visualizations.R +++ b/R/query_visualizations.R @@ -5,9 +5,6 @@ NULL #' Add visualization to a query. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param description A short description of this visualization. #' @param name The name of the visualization that appears on dashboards and the query screen. @@ -23,12 +20,8 @@ queryVisualizationsCreate <- function(client, query_id, type, options, descripti type = type) client$do("POST", "/api/2.0/preview/sql/visualizations", body = body) } - #' Remove visualization. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Widget ID returned by :method:queryvizualisations/create. #' @@ -38,12 +31,8 @@ queryVisualizationsDelete <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/sql/visualizations/", id, sep = "")) } - #' Edit existing visualization. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param created_at This field has no description yet. #' @param description A short description of this visualization. @@ -63,3 +52,5 @@ queryVisualizationsUpdate <- function(client, id, created_at = NULL, description body = body) } + + diff --git a/R/recipient_activation.R b/R/recipient_activation.R index 82856107..9613b9da 100755 --- a/R/recipient_activation.R +++ b/R/recipient_activation.R @@ -7,9 +7,6 @@ NULL #' #' Gets an activation URL for a share. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param activation_url Required. The one time activation url. #' @@ -20,15 +17,11 @@ recipientActivationGetActivationUrlInfo <- function(client, activation_url) { client$do("GET", paste("/api/2.1/unity-catalog/public/data_sharing_activation_info/", activation_url, sep = "")) } - #' Get an access token. #' #' Retrieve access token with an activation url. This is a public API without #' any authentication. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param activation_url Required. The one time activation url. #' @@ -40,3 +33,4 @@ recipientActivationRetrieveToken <- function(client, activation_url) { activation_url, sep = "")) } + diff --git a/R/recipients.R b/R/recipients.R index 3aac12af..f839c7f9 100755 --- a/R/recipients.R +++ b/R/recipients.R @@ -9,9 +9,6 @@ NULL #' metastore. The caller must be a metastore admin or has the #' **CREATE_RECIPIENT** privilege on the metastore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param authentication_type Required. The delta sharing authentication type. #' @param comment Description about the recipient. @@ -31,15 +28,11 @@ recipientsCreate <- function(client, name, authentication_type, comment = NULL, sharing_code = sharing_code) client$do("POST", "/api/2.1/unity-catalog/recipients", body = body) } - #' Delete a share recipient. #' #' Deletes the specified recipient from the metastore. The caller must be the #' owner of the recipient. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the recipient. #' @@ -49,16 +42,12 @@ recipientsDelete <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/recipients/", name, sep = "")) } - #' Get a share recipient. #' #' Gets a share recipient from the metastore if: #' #' * the caller is the owner of the share recipient, or: * is a metastore admin #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the recipient. #' @@ -68,7 +57,6 @@ recipientsGet <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/recipients/", name, sep = "")) } - #' List share recipients. #' #' Gets an array of all share recipients within the current metastore where: @@ -76,8 +64,6 @@ recipientsGet <- function(client, name) { #' * the caller is a metastore admin, or * the caller is the owner. There is no #' guarantee of a specific ordering of the elements in the array. #' @param client Required. Instance of DatabricksClient() - - #' #' @param data_recipient_global_metastore_id If not provided, all recipients will be returned. #' @@ -92,15 +78,11 @@ recipientsList <- function(client, data_recipient_global_metastore_id = NULL) { return(json$recipients) } - #' Rotate a token. #' #' Refreshes the specified recipient's delta sharing authentication token with #' the provided token info. The caller must be the owner of the recipient. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param existing_token_expire_in_seconds Required. The expiration time of the bearer token in ISO 8601 format. #' @param name Required. The name of the recipient. @@ -112,15 +94,11 @@ recipientsRotateToken <- function(client, name, existing_token_expire_in_seconds client$do("POST", paste("/api/2.1/unity-catalog/recipients/", name, "/rotate-token", , sep = ""), body = body) } - #' Get recipient share permissions. #' #' Gets the share permissions for the specified Recipient. The caller must be a #' metastore admin or the owner of the Recipient. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the Recipient. #' @@ -131,7 +109,6 @@ recipientsSharePermissions <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/recipients/", name, "/share-permissions", , sep = "")) } - #' Update a share recipient. #' #' Updates an existing recipient in the metastore. The caller must be a @@ -139,9 +116,6 @@ recipientsSharePermissions <- function(client, name) { #' updated, the user must be both a metastore admin and the owner of the #' recipient. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment Description about the recipient. #' @param ip_access_list IP Access List. @@ -160,3 +134,9 @@ recipientsUpdate <- function(client, name, comment = NULL, ip_access_list = NULL body = body) } + + + + + + diff --git a/R/registered_models.R b/R/registered_models.R index f1d4e74f..4d9d6f59 100755 --- a/R/registered_models.R +++ b/R/registered_models.R @@ -18,9 +18,6 @@ NULL #' caller must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the #' parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. The name of the catalog where the schema and the registered model reside. #' @param comment The comment attached to the registered model. @@ -36,7 +33,6 @@ registeredModelsCreate <- function(client, catalog_name, schema_name, name, comm storage_location = storage_location) client$do("POST", "/api/2.1/unity-catalog/models", body = body) } - #' Delete a Registered Model. #' #' Deletes a registered model and all its model versions from the specified @@ -47,9 +43,6 @@ registeredModelsCreate <- function(client, catalog_name, schema_name, name, comm #' **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** #' privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. The three-level (fully qualified) name of the registered model. #' @@ -59,7 +52,6 @@ registeredModelsDelete <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/models/", full_name, sep = "")) } - #' Delete a Registered Model Alias. #' #' Deletes a registered model alias. @@ -69,9 +61,6 @@ registeredModelsDelete <- function(client, full_name) { #' **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** #' privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param alias Required. The name of the alias. #' @param full_name Required. The three-level (fully qualified) name of the registered model. @@ -83,7 +72,6 @@ registeredModelsDeleteAlias <- function(client, full_name, alias) { client$do("DELETE", paste("/api/2.1/unity-catalog/models/", full_name, "/aliases/", alias, sep = "")) } - #' Get a Registered Model. #' #' Get a registered model. @@ -93,9 +81,6 @@ registeredModelsDeleteAlias <- function(client, full_name, alias) { #' be the owner or have the **USE_CATALOG** privilege on the parent catalog and #' the **USE_SCHEMA** privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. The three-level (fully qualified) name of the registered model. #' @param include_browse Whether to include registered models in the response for which the principal can only access selective metadata for. @@ -107,7 +92,6 @@ registeredModelsGet <- function(client, full_name, include_browse = NULL) { client$do("GET", paste("/api/2.1/unity-catalog/models/", full_name, sep = ""), query = query) } - #' List Registered Models. #' #' List registered models. You can list registered models under a particular @@ -123,8 +107,6 @@ registeredModelsGet <- function(client, full_name, include_browse = NULL) { #' #' There is no guarantee of a specific ordering of the elements in the response. #' @param client Required. Instance of DatabricksClient() - - #' #' @param catalog_name The identifier of the catalog under which to list registered models. #' @param include_browse Whether to include registered models in the response for which the principal can only access selective metadata for. @@ -157,7 +139,6 @@ registeredModelsList <- function(client, catalog_name = NULL, include_browse = N return(results) } - #' Set a Registered Model Alias. #' #' Set an alias on the specified registered model. @@ -167,9 +148,6 @@ registeredModelsList <- function(client, catalog_name = NULL, include_browse = N #' **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** #' privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param alias Required. The name of the alias. #' @param full_name Required. Full name of the registered model. @@ -182,7 +160,6 @@ registeredModelsSetAlias <- function(client, full_name, alias, version_num) { client$do("PUT", paste("/api/2.1/unity-catalog/models/", full_name, "/aliases/", alias, sep = ""), body = body) } - #' Update a Registered Model. #' #' Updates the specified registered model. @@ -195,9 +172,6 @@ registeredModelsSetAlias <- function(client, full_name, alias, version_num) { #' Currently only the name, the owner or the comment of the registered model can #' be updated. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment The comment attached to the registered model. #' @param full_name Required. The three-level (fully qualified) name of the registered model. @@ -213,3 +187,9 @@ registeredModelsUpdate <- function(client, full_name, comment = NULL, new_name = body = body) } + + + + + + diff --git a/R/repos.R b/R/repos.R index 419f7c09..5f011ef9 100755 --- a/R/repos.R +++ b/R/repos.R @@ -9,9 +9,6 @@ NULL #' specified. Note that repos created programmatically must be linked to a #' remote Git repo, unlike repos created in the browser. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Desired path for the repo in the workspace. #' @param provider Required. Git provider. @@ -25,14 +22,10 @@ reposCreate <- function(client, url, provider, path = NULL, sparse_checkout = NU url = url) client$do("POST", "/api/2.0/repos", body = body) } - #' Delete a repo. #' #' Deletes the specified repo. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param repo_id Required. The ID for the corresponding repo to access. #' @@ -42,14 +35,10 @@ reposDelete <- function(client, repo_id) { client$do("DELETE", paste("/api/2.0/repos/", repo_id, sep = "")) } - #' Get a repo. #' #' Returns the repo with the given repo ID. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param repo_id Required. The ID for the corresponding repo to access. #' @@ -59,14 +48,10 @@ reposGet <- function(client, repo_id) { client$do("GET", paste("/api/2.0/repos/", repo_id, sep = "")) } - #' Get repo permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param repo_id Required. The repo for which to get or manage permissions. #' @@ -77,15 +62,11 @@ reposGetPermissionLevels <- function(client, repo_id) { client$do("GET", paste("/api/2.0/permissions/repos/", repo_id, "/permissionLevels", , sep = "")) } - #' Get repo permissions. #' #' Gets the permissions of a repo. Repos can inherit permissions from their root #' object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param repo_id Required. The repo for which to get or manage permissions. #' @@ -95,14 +76,11 @@ reposGetPermissions <- function(client, repo_id) { client$do("GET", paste("/api/2.0/permissions/repos/", repo_id, sep = "")) } - #' Get repos. #' #' Returns repos that the calling user has Manage permissions on. Results are #' paginated with each page containing twenty repos. #' @param client Required. Instance of DatabricksClient() - - #' #' @param next_page_token Token used to get the next page of results. #' @param path_prefix Filters repos that have paths starting with the given path prefix. @@ -130,15 +108,11 @@ reposList <- function(client, next_page_token = NULL, path_prefix = NULL) { return(results) } - #' Set repo permissions. #' #' Sets permissions on a repo. Repos can inherit permissions from their root #' object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param repo_id Required. The repo for which to get or manage permissions. @@ -149,15 +123,11 @@ reposSetPermissions <- function(client, repo_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/repos/", repo_id, sep = ""), body = body) } - #' Update a repo. #' #' Updates the repo to a different branch or tag, or updates the repo to the #' latest commit on the same branch. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param branch Branch that the local version of the repo is checked out to. #' @param repo_id Required. The ID for the corresponding repo to access. @@ -170,15 +140,11 @@ reposUpdate <- function(client, repo_id, branch = NULL, sparse_checkout = NULL, body <- list(branch = branch, sparse_checkout = sparse_checkout, tag = tag) client$do("PATCH", paste("/api/2.0/repos/", repo_id, sep = ""), body = body) } - #' Update repo permissions. #' #' Updates the permissions on a repo. Repos can inherit permissions from their #' root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param repo_id Required. The repo for which to get or manage permissions. @@ -190,3 +156,11 @@ reposUpdatePermissions <- function(client, repo_id, access_control_list = NULL) client$do("PATCH", paste("/api/2.0/permissions/repos/", repo_id, sep = ""), body = body) } + + + + + + + + diff --git a/R/restrict_workspace_admins.R b/R/restrict_workspace_admins.R index dd7ab990..5db17299 100755 --- a/R/restrict_workspace_admins.R +++ b/R/restrict_workspace_admins.R @@ -12,8 +12,6 @@ NULL #' 409 and the request must be retried by using the fresh etag in the 409 #' response. #' @param client Required. Instance of DatabricksClient() - - #' #' @param etag etag used for versioning. #' @@ -24,13 +22,10 @@ restrictWorkspaceAdminsDelete <- function(client, etag = NULL) { client$do("DELETE", "/api/2.0/settings/types/restrict_workspace_admins/names/default", query = query) } - #' Get the restrict workspace admins setting. #' #' Gets the restrict workspace admins setting. #' @param client Required. Instance of DatabricksClient() - - #' #' @param etag etag used for versioning. #' @@ -41,7 +36,6 @@ restrictWorkspaceAdminsGet <- function(client, etag = NULL) { client$do("GET", "/api/2.0/settings/types/restrict_workspace_admins/names/default", query = query) } - #' Update the restrict workspace admins setting. #' #' Updates the restrict workspace admins setting for the workspace. A fresh etag @@ -50,9 +44,6 @@ restrictWorkspaceAdminsGet <- function(client, etag = NULL) { #' the setting is updated concurrently, `PATCH` fails with 409 and the request #' must be retried by using the fresh etag in the 409 response. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param allow_missing Required. This should always be set to true for Settings API. #' @param field_mask Required. Field mask is required to be passed into the PATCH request. @@ -66,3 +57,5 @@ restrictWorkspaceAdminsUpdate <- function(client, allow_missing, setting, field_ body = body) } + + diff --git a/R/schemas.R b/R/schemas.R index bf0b997b..3d732388 100755 --- a/R/schemas.R +++ b/R/schemas.R @@ -9,9 +9,6 @@ NULL #' metastore admin, or have the **CREATE_SCHEMA** privilege in the parent #' catalog. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. Name of parent catalog. #' @param comment User-provided free-form text description. @@ -27,15 +24,11 @@ schemasCreate <- function(client, name, catalog_name, comment = NULL, properties storage_root = storage_root) client$do("POST", "/api/2.1/unity-catalog/schemas", body = body) } - #' Delete a schema. #' #' Deletes the specified schema from the parent catalog. The caller must be the #' owner of the schema or an owner of the parent catalog. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the schema. #' @@ -45,16 +38,12 @@ schemasDelete <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/schemas/", full_name, sep = "")) } - #' Get a schema. #' #' Gets the specified schema within the metastore. The caller must be a #' metastore admin, the owner of the schema, or a user that has the #' **USE_SCHEMA** privilege on the schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the schema. #' @param include_browse Whether to include schemas in the response for which the principal can only access selective metadata for. @@ -66,7 +55,6 @@ schemasGet <- function(client, full_name, include_browse = NULL) { client$do("GET", paste("/api/2.1/unity-catalog/schemas/", full_name, sep = ""), query = query) } - #' List schemas. #' #' Gets an array of schemas for a catalog in the metastore. If the caller is the @@ -75,9 +63,6 @@ schemasGet <- function(client, full_name, include_browse = NULL) { #' for which the caller has the **USE_SCHEMA** privilege) will be retrieved. #' There is no guarantee of a specific ordering of the elements in the array. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. Parent catalog for schemas of interest. #' @param include_browse Whether to include schemas in the response for which the principal can only access selective metadata for. @@ -109,7 +94,6 @@ schemasList <- function(client, catalog_name, include_browse = NULL, max_results return(results) } - #' Update a schema. #' #' Updates a schema for a catalog. The caller must be the owner of the schema or @@ -118,9 +102,6 @@ schemasList <- function(client, catalog_name, include_browse = NULL, max_results #' the caller must be a metastore admin or have the **CREATE_SCHEMA** privilege #' on the parent catalog. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided free-form text description. #' @param enable_predictive_optimization Whether predictive optimization should be enabled for this object and objects under it. @@ -139,3 +120,7 @@ schemasUpdate <- function(client, full_name, comment = NULL, enable_predictive_o body = body) } + + + + diff --git a/R/secrets.R b/R/secrets.R index 0c143e97..feb693e4 100755 --- a/R/secrets.R +++ b/R/secrets.R @@ -8,9 +8,6 @@ NULL #' The scope name must consist of alphanumeric characters, dashes, underscores, #' and periods, and may not exceed 128 characters. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param backend_azure_keyvault The metadata for the secret scope if the type is `AZURE_KEYVAULT`. #' @param initial_manage_principal The principal that is initially granted `MANAGE` permission to the created scope. @@ -25,7 +22,6 @@ secretsCreateScope <- function(client, scope, backend_azure_keyvault = NULL, ini scope = scope, scope_backend_type = scope_backend_type) client$do("POST", "/api/2.0/secrets/scopes/create", body = body) } - #' Delete an ACL. #' #' Deletes the given ACL on the given scope. @@ -35,9 +31,6 @@ secretsCreateScope <- function(client, scope, backend_azure_keyvault = NULL, ini #' Throws `PERMISSION_DENIED` if the user does not have permission to make this #' API call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param principal Required. The principal to remove an existing ACL from. #' @param scope Required. The name of the scope to remove permissions from. @@ -48,7 +41,6 @@ secretsDeleteAcl <- function(client, scope, principal) { body <- list(principal = principal, scope = scope) client$do("POST", "/api/2.0/secrets/acls/delete", body = body) } - #' Delete a secret scope. #' #' Deletes a secret scope. @@ -57,9 +49,6 @@ secretsDeleteAcl <- function(client, scope, principal) { #' `PERMISSION_DENIED` if the user does not have permission to make this API #' call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param scope Required. Name of the scope to delete. #' @@ -69,7 +58,6 @@ secretsDeleteScope <- function(client, scope) { body <- list(scope = scope) client$do("POST", "/api/2.0/secrets/scopes/delete", body = body) } - #' Delete a secret. #' #' Deletes the secret stored in this secret scope. You must have `WRITE` or @@ -79,9 +67,6 @@ secretsDeleteScope <- function(client, scope) { #' Throws `PERMISSION_DENIED` if the user does not have permission to make this #' API call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. Name of the secret to delete. #' @param scope Required. The name of the scope that contains the secret to delete. @@ -92,7 +77,6 @@ secretsDeleteSecret <- function(client, scope, key) { body <- list(key = key, scope = scope) client$do("POST", "/api/2.0/secrets/delete", body = body) } - #' Get secret ACL details. #' #' Gets the details about the given ACL, such as the group and permission. Users @@ -102,9 +86,6 @@ secretsDeleteSecret <- function(client, scope, key) { #' `PERMISSION_DENIED` if the user does not have permission to make this API #' call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param principal Required. The principal to fetch ACL information for. #' @param scope Required. The name of the scope to fetch ACL information from. @@ -115,7 +96,6 @@ secretsGetAcl <- function(client, scope, principal) { query <- list(principal = principal, scope = scope) client$do("GET", "/api/2.0/secrets/acls/get", query = query) } - #' Get a secret. #' #' Gets the bytes representation of a secret value for the specified scope and @@ -131,9 +111,6 @@ secretsGetAcl <- function(client, scope, principal) { #' this API call. Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret or secret #' scope exists. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param key Required. The key to fetch secret for. #' @param scope Required. The name of the scope to fetch secret information from. @@ -144,7 +121,6 @@ secretsGetSecret <- function(client, scope, key) { query <- list(key = key, scope = scope) client$do("GET", "/api/2.0/secrets/get", query = query) } - #' Lists ACLs. #' #' List the ACLs for a given secret scope. Users must have the `MANAGE` @@ -154,9 +130,6 @@ secretsGetSecret <- function(client, scope, key) { #' `PERMISSION_DENIED` if the user does not have permission to make this API #' call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param scope Required. The name of the scope to fetch ACL information from. #' @@ -171,7 +144,6 @@ secretsListAcls <- function(client, scope) { return(json$items) } - #' List all scopes. #' #' Lists all secret scopes available in the workspace. @@ -190,7 +162,6 @@ secretsListScopes <- function(client) { return(json$scopes) } - #' List secret keys. #' #' Lists the secret keys that are stored at this scope. This is a metadata-only @@ -202,9 +173,6 @@ secretsListScopes <- function(client) { #' `PERMISSION_DENIED` if the user does not have permission to make this API #' call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param scope Required. The name of the scope to list secrets within. #' @@ -219,7 +187,6 @@ secretsListSecrets <- function(client, scope) { return(json$secrets) } - #' Create/update an ACL. #' #' Creates or overwrites the Access Control List (ACL) associated with the given @@ -249,9 +216,6 @@ secretsListSecrets <- function(client, scope) { #' Throws `PERMISSION_DENIED` if the user does not have permission to make this #' API call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param permission Required. The permission level applied to the principal. #' @param principal Required. The principal in which the permission is applied. @@ -263,7 +227,6 @@ secretsPutAcl <- function(client, scope, principal, permission) { body <- list(permission = permission, principal = principal, scope = scope) client$do("POST", "/api/2.0/secrets/acls/put", body = body) } - #' Add a secret. #' #' Inserts a secret under the provided scope with the given name. If a secret @@ -286,9 +249,6 @@ secretsPutAcl <- function(client, scope, principal, permission) { #' Throws `PERMISSION_DENIED` if the user does not have permission to make this #' API call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param bytes_value If specified, value will be stored as bytes. #' @param key Required. A unique name to identify the secret. @@ -302,3 +262,13 @@ secretsPutSecret <- function(client, scope, key, bytes_value = NULL, string_valu client$do("POST", "/api/2.0/secrets/put", body = body) } + + + + + + + + + + diff --git a/R/service_principals.R b/R/service_principals.R index d78fa8e7..15d56754 100755 --- a/R/service_principals.R +++ b/R/service_principals.R @@ -7,8 +7,6 @@ NULL #' #' Creates a new service principal in the Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - #' #' @param active If this user is active. #' @param application_id UUID relating to the service principal. @@ -30,14 +28,10 @@ servicePrincipalsCreate <- function(client, active = NULL, application_id = NULL roles = roles, schemas = schemas) client$do("POST", "/api/2.0/preview/scim/v2/ServicePrincipals", body = body) } - #' Delete a service principal. #' #' Delete a single service principal in the Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a service principal in the Databricks workspace. #' @@ -48,15 +42,11 @@ servicePrincipalsDelete <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/scim/v2/ServicePrincipals/", id, sep = "")) } - #' Get service principal details. #' #' Gets the details for a single service principal define in the Databricks #' workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a service principal in the Databricks workspace. #' @@ -66,13 +56,10 @@ servicePrincipalsGet <- function(client, id) { client$do("GET", paste("/api/2.0/preview/scim/v2/ServicePrincipals/", id, sep = "")) } - #' List service principals. #' #' Gets the set of service principals associated with a Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - #' #' @param attributes Comma-separated list of attributes to return in response. #' @param count Desired number of results per page. @@ -107,15 +94,11 @@ servicePrincipalsList <- function(client, attributes = NULL, count = NULL, exclu return(results) } - #' Update service principal details. #' #' Partially updates the details of a single service principal in the Databricks #' workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a service principal in the Databricks workspace. #' @param operations This field has no description yet. @@ -128,16 +111,12 @@ servicePrincipalsPatch <- function(client, id, operations = NULL, schemas = NULL client$do("PATCH", paste("/api/2.0/preview/scim/v2/ServicePrincipals/", id, sep = ""), body = body) } - #' Replace service principal. #' #' Updates the details of a single service principal. #' #' This action replaces the existing service principal with the same name. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param active If this user is active. #' @param application_id UUID relating to the service principal. @@ -161,3 +140,8 @@ servicePrincipalsUpdate <- function(client, id, active = NULL, application_id = body = body) } + + + + + diff --git a/R/serving_endpoints.R b/R/serving_endpoints.R index 562c0e57..c05d1862 100755 --- a/R/serving_endpoints.R +++ b/R/serving_endpoints.R @@ -7,9 +7,6 @@ NULL #' #' Retrieves the build logs associated with the provided served model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the serving endpoint that the served model belongs to. #' @param served_model_name Required. The name of the served model that build logs will be retrieved for. @@ -21,20 +18,8 @@ servingEndpointsBuildLogs <- function(client, name, served_model_name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, "/served-models/", served_model_name, "/build-logs", , sep = "")) } - #' Create a new serving endpoint. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Serving Endpoints on Databricks reach -#' NOT_UPDATING state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param config Required. The core config of the serving endpoint. #' @param name Required. The name of the serving endpoint. @@ -43,52 +28,12 @@ servingEndpointsBuildLogs <- function(client, name, served_model_name) { #' #' @rdname servingEndpointsCreate #' @export -servingEndpointsCreate <- function(client, name, config, rate_limits = NULL, tags = NULL, - timeout = 20, callback = cli_reporter) { +servingEndpointsCreate <- function(client, name, config, rate_limits = NULL, tags = NULL) { body <- list(config = config, name = name, rate_limits = rate_limits, tags = tags) - op_response <- client$do("POST", "/api/2.0/serving-endpoints", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("NOT_UPDATING", c()) - failure_states <- c("UPDATE_FAILED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- servingEndpointsGet(client, name = op_response$name) - status <- poll$state$config_update - status_message <- paste("current status:", status) - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach NOT_UPDATING, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::servingEndpointsGet(name=", op_response$name, - ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/serving-endpoints", body = body) } - #' Delete a serving endpoint. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the serving endpoint. #' @@ -98,15 +43,11 @@ servingEndpointsDelete <- function(client, name) { client$do("DELETE", paste("/api/2.0/serving-endpoints/", name, sep = "")) } - #' Get metrics of a serving endpoint. #' #' Retrieves the metrics associated with the provided serving endpoint in either #' Prometheus or OpenMetrics exposition format. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the serving endpoint to retrieve metrics for. #' @@ -116,14 +57,10 @@ servingEndpointsExportMetrics <- function(client, name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, "/metrics", , sep = "")) } - #' Get a single serving endpoint. #' #' Retrieves the details for a single serving endpoint. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the serving endpoint. #' @@ -133,14 +70,10 @@ servingEndpointsGet <- function(client, name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, sep = "")) } - #' Get serving endpoint permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. #' @@ -151,15 +84,11 @@ servingEndpointsGetPermissionLevels <- function(client, serving_endpoint_id) { client$do("GET", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, "/permissionLevels", , sep = "")) } - #' Get serving endpoint permissions. #' #' Gets the permissions of a serving endpoint. Serving endpoints can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. #' @@ -170,7 +99,6 @@ servingEndpointsGetPermissions <- function(client, serving_endpoint_id) { client$do("GET", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, sep = "")) } - #' Get all serving endpoints. #' @param client Required. Instance of DatabricksClient() #' @@ -184,14 +112,10 @@ servingEndpointsList <- function(client) { return(json$endpoints) } - #' Get the latest logs for a served model. #' #' Retrieves the service logs associated with the provided served model. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the serving endpoint that the served model belongs to. #' @param served_model_name Required. The name of the served model that logs will be retrieved for. @@ -203,15 +127,11 @@ servingEndpointsLogs <- function(client, name, served_model_name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, "/served-models/", served_model_name, "/logs", , sep = "")) } - #' Update tags of a serving endpoint. #' #' Used to batch add and delete tags from a serving endpoint with a single API #' call. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param add_tags List of endpoint tags to add. #' @param delete_tags List of tag keys to delete. @@ -224,15 +144,11 @@ servingEndpointsPatch <- function(client, name, add_tags = NULL, delete_tags = N client$do("PATCH", paste("/api/2.0/serving-endpoints/", name, "/tags", , sep = ""), body = body) } - #' Update rate limits of a serving endpoint. #' #' Used to update the rate limits of a serving endpoint. NOTE: only external and #' foundation model endpoints are supported as of now. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the serving endpoint whose rate limits are being updated. #' @param rate_limits The list of endpoint rate limits. @@ -244,12 +160,8 @@ servingEndpointsPut <- function(client, name, rate_limits = NULL) { client$do("PUT", paste("/api/2.0/serving-endpoints/", name, "/rate-limits", , sep = ""), body = body) } - #' Query a serving endpoint. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param dataframe_records Pandas Dataframe input in the records orientation. #' @param dataframe_split Pandas Dataframe input in the split orientation. @@ -278,15 +190,11 @@ servingEndpointsQuery <- function(client, name, dataframe_records = NULL, datafr client$do("POST", paste("/serving-endpoints/", name, "/invocations", , sep = ""), body = body) } - #' Set serving endpoint permissions. #' #' Sets permissions on a serving endpoint. Serving endpoints can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. @@ -298,7 +206,6 @@ servingEndpointsSetPermissions <- function(client, serving_endpoint_id, access_c client$do("PUT", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, sep = ""), body = body) } - #' Update config of a serving endpoint. #' #' Updates any combination of the serving endpoint's served entities, the @@ -306,6 +213,41 @@ servingEndpointsSetPermissions <- function(client, serving_endpoint_id, access_c #' config. An endpoint that already has an update in progress can not be updated #' until the current update completes or fails. #' @param client Required. Instance of DatabricksClient() +#' +#' @param auto_capture_config Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog. +#' @param name Required. The name of the serving endpoint to update. +#' @param served_entities A list of served entities for the endpoint to serve. +#' @param served_models (Deprecated, use served_entities instead) A list of served models for the endpoint to serve. +#' @param traffic_config The traffic config defining how invocations to the serving endpoint should be routed. +#' +#' @rdname servingEndpointsUpdateConfig +#' @export +servingEndpointsUpdateConfig <- function(client, name, auto_capture_config = NULL, + served_entities = NULL, served_models = NULL, traffic_config = NULL) { + body <- list(auto_capture_config = auto_capture_config, served_entities = served_entities, + served_models = served_models, traffic_config = traffic_config) + client$do("PUT", paste("/api/2.0/serving-endpoints/", name, "/config", , sep = ""), + body = body) +} +#' Update serving endpoint permissions. +#' +#' Updates the permissions on a serving endpoint. Serving endpoints can inherit +#' permissions from their root object. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param access_control_list This field has no description yet. +#' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. +#' +#' @rdname servingEndpointsUpdatePermissions +#' @export +servingEndpointsUpdatePermissions <- function(client, serving_endpoint_id, access_control_list = NULL) { + body <- list(access_control_list = access_control_list) + client$do("PATCH", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, + sep = ""), body = body) +} + +#' Create a new serving endpoint. +#' @param client Required. Instance of DatabricksClient() #' #' @description @@ -315,8 +257,82 @@ servingEndpointsSetPermissions <- function(client, serving_endpoint_id, access_c #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param config Required. The core config of the serving endpoint. +#' @param name Required. The name of the serving endpoint. +#' @param rate_limits Rate limits to be applied to the serving endpoint. +#' @param tags Tags to be attached to the serving endpoint and automatically propagated to billing logs. +#' +#' @rdname servingEndpointsCreateAndWait +#' @export +servingEndpointsCreateAndWait <- function(client, name, config, rate_limits = NULL, + tags = NULL, timeout = 20, callback = cli_reporter) { + body <- list(config = config, name = name, rate_limits = rate_limits, tags = tags) + op_response <- client$do("POST", "/api/2.0/serving-endpoints", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("NOT_UPDATING", c()) + failure_states <- c("UPDATE_FAILED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- servingEndpointsGet(client, name = op_response$name) + status <- poll$state$config_update + status_message <- paste("current status:", status) + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach NOT_UPDATING, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::servingEndpointsGet(name=", op_response$name, + ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + + + + + + + + + + + + +#' Update config of a serving endpoint. +#' +#' Updates any combination of the serving endpoint's served entities, the +#' compute configuration of those served entities, and the endpoint's traffic +#' config. An endpoint that already has an update in progress can not be updated +#' until the current update completes or fails. +#' @param client Required. Instance of DatabricksClient() #' +#' @description +#' This is a long-running operation, which blocks until Serving Endpoints on Databricks reach +#' NOT_UPDATING state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. #' #' @param auto_capture_config Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog. #' @param name Required. The name of the serving endpoint to update. @@ -324,9 +340,9 @@ servingEndpointsSetPermissions <- function(client, serving_endpoint_id, access_c #' @param served_models (Deprecated, use served_entities instead) A list of served models for the endpoint to serve. #' @param traffic_config The traffic config defining how invocations to the serving endpoint should be routed. #' -#' @rdname servingEndpointsUpdateConfig +#' @rdname servingEndpointsUpdateConfigAndWait #' @export -servingEndpointsUpdateConfig <- function(client, name, auto_capture_config = NULL, +servingEndpointsUpdateConfigAndWait <- function(client, name, auto_capture_config = NULL, served_entities = NULL, served_models = NULL, traffic_config = NULL, timeout = 20, callback = cli_reporter) { body <- list(auto_capture_config = auto_capture_config, served_entities = served_entities, @@ -370,23 +386,4 @@ servingEndpointsUpdateConfig <- function(client, name, auto_capture_config = NUL rlang::abort(msg, call = rlang::caller_env()) } -#' Update serving endpoint permissions. -#' -#' Updates the permissions on a serving endpoint. Serving endpoints can inherit -#' permissions from their root object. -#' @param client Required. Instance of DatabricksClient() - - -#' -#' -#' @param access_control_list This field has no description yet. -#' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. -#' -#' @rdname servingEndpointsUpdatePermissions -#' @export -servingEndpointsUpdatePermissions <- function(client, serving_endpoint_id, access_control_list = NULL) { - body <- list(access_control_list = access_control_list) - client$do("PATCH", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, - sep = ""), body = body) -} diff --git a/R/shares.R b/R/shares.R index 7e478fb9..422a47a5 100755 --- a/R/shares.R +++ b/R/shares.R @@ -9,9 +9,6 @@ NULL #' creation with **update**. The caller must be a metastore admin or have the #' **CREATE_SHARE** privilege on the metastore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided free-form text description. #' @param name Required. Name of the share. @@ -22,15 +19,11 @@ sharesCreate <- function(client, name, comment = NULL) { body <- list(comment = comment, name = name) client$do("POST", "/api/2.1/unity-catalog/shares", body = body) } - #' Delete a share. #' #' Deletes a data object share from the metastore. The caller must be an owner #' of the share. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the share. #' @@ -40,15 +33,11 @@ sharesDelete <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/shares/", name, sep = "")) } - #' Get a share. #' #' Gets a data object share from the metastore. The caller must be a metastore #' admin or the owner of the share. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param include_shared_data Query for data to include in the share. #' @param name Required. The name of the share. @@ -59,7 +48,6 @@ sharesGet <- function(client, name, include_shared_data = NULL) { query <- list(include_shared_data = include_shared_data) client$do("GET", paste("/api/2.1/unity-catalog/shares/", name, sep = ""), query = query) } - #' List shares. #' #' Gets an array of data object shares from the metastore. The caller must be a @@ -77,15 +65,11 @@ sharesList <- function(client) { return(json$shares) } - #' Get permissions. #' #' Gets the permissions for a data share from the metastore. The caller must be #' a metastore admin or the owner of the share. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the share. #' @@ -96,7 +80,6 @@ sharesSharePermissions <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/shares/", name, "/permissions", , sep = "")) } - #' Update a share. #' #' Updates the share with the changes and data objects in the request. The @@ -115,9 +98,6 @@ sharesSharePermissions <- function(client, name) { #' #' Table removals through **update** do not require additional privileges. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment User-provided free-form text description. #' @param name Required. The name of the share. @@ -132,7 +112,6 @@ sharesUpdate <- function(client, name, comment = NULL, new_name = NULL, owner = body <- list(comment = comment, new_name = new_name, owner = owner, updates = updates) client$do("PATCH", paste("/api/2.1/unity-catalog/shares/", name, sep = ""), body = body) } - #' Update permissions. #' #' Updates the permissions for a data share in the metastore. The caller must be @@ -141,9 +120,6 @@ sharesUpdate <- function(client, name, comment = NULL, new_name = NULL, owner = #' For new recipient grants, the user must also be the owner of the recipients. #' recipient revocations do not require additional privileges. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param changes Array of permission changes. #' @param name Required. The name of the share. @@ -156,3 +132,9 @@ sharesUpdatePermissions <- function(client, name, changes = NULL) { , sep = ""), body = body) } + + + + + + diff --git a/R/statement_execution.R b/R/statement_execution.R index 1bf8b5ec..40b8177d 100755 --- a/R/statement_execution.R +++ b/R/statement_execution.R @@ -8,9 +8,6 @@ NULL #' Requests that an executing statement be canceled. Callers must poll for #' status to see the terminal state. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param statement_id Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. #' @@ -21,12 +18,8 @@ statementExecutionCancelExecution <- function(client, statement_id) { client$do("POST", paste("/api/2.0/sql/statements/", statement_id, "/cancel", , sep = "")) } - #' Execute a SQL statement. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param byte_limit Applies the given byte limit to the statement's result size. #' @param catalog Sets default catalog for statement execution, similar to [`USE CATALOG`](https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html) in SQL. @@ -51,7 +44,6 @@ statementExecutionExecuteStatement <- function(client, statement, warehouse_id, warehouse_id = warehouse_id) client$do("POST", "/api/2.0/sql/statements/", body = body) } - #' Get status, manifest, and result first chunk. #' #' This request can be used to poll for the statement's status. When the @@ -64,9 +56,6 @@ statementExecutionExecuteStatement <- function(client, statement, warehouse_id, #' **NOTE** This call currently might take up to 5 seconds to get the latest #' status and result. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param statement_id Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. #' @@ -76,7 +65,6 @@ statementExecutionGetStatement <- function(client, statement_id) { client$do("GET", paste("/api/2.0/sql/statements/", statement_id, sep = "")) } - #' Get result chunk by index. #' #' After the statement execution has `SUCCEEDED`, this request can be used to @@ -88,9 +76,6 @@ statementExecutionGetStatement <- function(client, statement_id) { #' similarly includes the `next_chunk_index` and `next_chunk_internal_link` #' fields for simple iteration through the result set. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param chunk_index Required. This field has no description yet. #' @param statement_id Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. @@ -103,3 +88,6 @@ statementExecutionGetStatementResultChunkN <- function(client, statement_id, chu chunk_index, sep = "")) } + + + diff --git a/R/storage_credentials.R b/R/storage_credentials.R index da37aa43..59da4547 100755 --- a/R/storage_credentials.R +++ b/R/storage_credentials.R @@ -7,9 +7,6 @@ NULL #' #' Creates a new storage credential. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param aws_iam_role The AWS IAM role configuration. #' @param azure_managed_identity The Azure managed identity configuration. @@ -32,15 +29,11 @@ storageCredentialsCreate <- function(client, name, aws_iam_role = NULL, azure_ma name = name, read_only = read_only, skip_validation = skip_validation) client$do("POST", "/api/2.1/unity-catalog/storage-credentials", body = body) } - #' Delete a credential. #' #' Deletes a storage credential from the metastore. The caller must be an owner #' of the storage credential. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param force Force deletion even if there are dependent external locations or external tables. #' @param name Required. Name of the storage credential. @@ -52,16 +45,12 @@ storageCredentialsDelete <- function(client, name, force = NULL) { client$do("DELETE", paste("/api/2.1/unity-catalog/storage-credentials/", name, sep = ""), query = query) } - #' Get a credential. #' #' Gets a storage credential from the metastore. The caller must be a metastore #' admin, the owner of the storage credential, or have some permission on the #' storage credential. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. Name of the storage credential. #' @@ -71,7 +60,6 @@ storageCredentialsGet <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/storage-credentials/", name, sep = "")) } - #' List credentials. #' #' Gets an array of storage credentials (as __StorageCredentialInfo__ objects). @@ -80,8 +68,6 @@ storageCredentialsGet <- function(client, name) { #' credentials is unrestricted. There is no guarantee of a specific ordering of #' the elements in the array. #' @param client Required. Instance of DatabricksClient() - - #' #' @param max_results Maximum number of storage credentials to return. #' @param page_token Opaque pagination token to go to next page based on previous query. @@ -109,14 +95,10 @@ storageCredentialsList <- function(client, max_results = NULL, page_token = NULL return(results) } - #' Update a credential. #' #' Updates a storage credential on the metastore. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param aws_iam_role The AWS IAM role configuration. #' @param azure_managed_identity The Azure managed identity configuration. @@ -145,7 +127,6 @@ storageCredentialsUpdate <- function(client, name, aws_iam_role = NULL, azure_ma client$do("PATCH", paste("/api/2.1/unity-catalog/storage-credentials/", name, sep = ""), body = body) } - #' Validate a storage credential. #' #' Validates a storage credential. At least one of __external_location_name__ @@ -161,8 +142,6 @@ storageCredentialsUpdate <- function(client, name, aws_iam_role = NULL, azure_ma #' the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the storage #' credential. #' @param client Required. Instance of DatabricksClient() - - #' #' @param aws_iam_role The AWS IAM role configuration. #' @param azure_managed_identity The Azure managed identity configuration. @@ -188,3 +167,8 @@ storageCredentialsValidate <- function(client, aws_iam_role = NULL, azure_manage client$do("POST", "/api/2.1/unity-catalog/validate-storage-credentials", body = body) } + + + + + diff --git a/R/system_schemas.R b/R/system_schemas.R index ed6a0561..fdc69ca3 100755 --- a/R/system_schemas.R +++ b/R/system_schemas.R @@ -8,9 +8,6 @@ NULL #' Disables the system schema and removes it from the system catalog. The caller #' must be an account admin or a metastore admin. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param metastore_id Required. The metastore ID under which the system schema lives. #' @param schema_name Required. Full name of the system schema. @@ -22,15 +19,11 @@ systemSchemasDisable <- function(client, metastore_id, schema_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/metastores/", metastore_id, "/systemschemas/", schema_name, sep = "")) } - #' Enable a system schema. #' #' Enables the system schema and adds it to the system catalog. The caller must #' be an account admin or a metastore admin. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param metastore_id Required. The metastore ID under which the system schema lives. #' @param schema_name Required. Full name of the system schema. @@ -42,15 +35,11 @@ systemSchemasEnable <- function(client, metastore_id, schema_name) { client$do("PUT", paste("/api/2.1/unity-catalog/metastores/", metastore_id, "/systemschemas/", schema_name, sep = "")) } - #' List system schemas. #' #' Gets an array of system schemas for a metastore. The caller must be an #' account admin or a metastore admin. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param metastore_id Required. The ID for the metastore in which the system schema resides. #' @@ -67,3 +56,5 @@ systemSchemasList <- function(client, metastore_id) { } + + diff --git a/R/table_constraints.R b/R/table_constraints.R index c0284f1d..782053f3 100755 --- a/R/table_constraints.R +++ b/R/table_constraints.R @@ -16,9 +16,6 @@ NULL #' referenced parent table's schema, and be the owner of the referenced parent #' table. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param constraint Required. A table constraint, as defined by *one* of the following fields being set: __primary_key_constraint__, __foreign_key_constraint__, __named_table_constraint__. #' @param full_name_arg Required. The full name of the table referenced by the constraint. @@ -29,7 +26,6 @@ tableConstraintsCreate <- function(client, full_name_arg, constraint) { body <- list(constraint = constraint, full_name_arg = full_name_arg) client$do("POST", "/api/2.1/unity-catalog/constraints", body = body) } - #' Delete a table constraint. #' #' Deletes a table constraint. @@ -42,9 +38,6 @@ tableConstraintsCreate <- function(client, full_name_arg, constraint) { #' **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** #' privilege on the table's schema, and be the owner of the table. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param cascade Required. If true, try deleting all child constraints of the current constraint. #' @param constraint_name Required. The name of the constraint to delete. @@ -58,3 +51,4 @@ tableConstraintsDelete <- function(client, full_name, constraint_name, cascade) query = query) } + diff --git a/R/tables.R b/R/tables.R index cfbd5d2e..c4b7de68 100755 --- a/R/tables.R +++ b/R/tables.R @@ -11,9 +11,6 @@ NULL #' table and have the **USE_CATALOG** privilege on the parent catalog and the #' **USE_SCHEMA** privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @@ -23,7 +20,6 @@ tablesDelete <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/tables/", full_name, sep = "")) } - #' Get boolean reflecting if table exists. #' #' Gets if a table exists in the metastore for a specific catalog and schema. @@ -35,9 +31,6 @@ tablesDelete <- function(client, full_name) { #' SELECT privilege on the table. * Have BROWSE privilege on the parent catalog #' * Have BROWSE privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @@ -48,7 +41,6 @@ tablesExists <- function(client, full_name) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/exists", , sep = "")) } - #' Get a table. #' #' Gets a table from the metastore for a specific catalog and schema. The caller @@ -59,9 +51,6 @@ tablesExists <- function(client, full_name) { #' parent schema, and either be the table owner or have the SELECT privilege on #' the table. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @param include_browse Whether to include tables in the response for which the principal can only access selective metadata for. @@ -74,7 +63,6 @@ tablesGet <- function(client, full_name, include_browse = NULL, include_delta_me client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, sep = ""), query = query) } - #' List tables. #' #' Gets an array of all tables for the current metastore under the parent @@ -84,9 +72,6 @@ tablesGet <- function(client, full_name, include_browse = NULL, include_delta_me #' catalog and the **USE_SCHEMA** privilege on the parent schema. There is no #' guarantee of a specific ordering of the elements in the array. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. Name of parent catalog for tables of interest. #' @param include_browse Whether to include tables in the response for which the principal can only access selective metadata for. @@ -124,7 +109,6 @@ tablesList <- function(client, catalog_name, schema_name, include_browse = NULL, return(results) } - #' List table summaries. #' #' Gets an array of summaries for tables for a schema and catalog within the @@ -139,9 +123,6 @@ tablesList <- function(client, catalog_name, schema_name, include_browse = NULL, #' #' There is no guarantee of a specific ordering of the elements in the array. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. Name of parent catalog for tables of interest. #' @param max_results Maximum number of summaries for tables to return. @@ -174,7 +155,6 @@ tablesListSummaries <- function(client, catalog_name, max_results = NULL, page_t return(results) } - #' Update a table owner. #' #' Change the owner of the table. The caller must be the owner of the parent @@ -183,9 +163,6 @@ tablesListSummaries <- function(client, catalog_name, max_results = NULL, page_t #' **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** #' privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param full_name Required. Full name of the table. #' @param owner This field has no description yet. @@ -198,3 +175,8 @@ tablesUpdate <- function(client, full_name, owner = NULL) { body = body) } + + + + + diff --git a/R/token_management.R b/R/token_management.R index d7f923e0..11aeba10 100755 --- a/R/token_management.R +++ b/R/token_management.R @@ -7,9 +7,6 @@ NULL #' #' Creates a token on behalf of a service principal. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param application_id Required. Application ID of the service principal. #' @param comment Comment that describes the purpose of the token. @@ -22,14 +19,10 @@ tokenManagementCreateOboToken <- function(client, application_id, comment = NULL body <- list(application_id = application_id, comment = comment, lifetime_seconds = lifetime_seconds) client$do("POST", "/api/2.0/token-management/on-behalf-of/tokens", body = body) } - #' Delete a token. #' #' Deletes a token, specified by its ID. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param token_id Required. The ID of the token to get. #' @@ -39,14 +32,10 @@ tokenManagementDelete <- function(client, token_id) { client$do("DELETE", paste("/api/2.0/token-management/tokens/", token_id, sep = "")) } - #' Get token info. #' #' Gets information about a token, specified by its ID. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param token_id Required. The ID of the token to get. #' @@ -56,7 +45,6 @@ tokenManagementGet <- function(client, token_id) { client$do("GET", paste("/api/2.0/token-management/tokens/", token_id, sep = "")) } - #' Get token permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -67,7 +55,6 @@ tokenManagementGet <- function(client, token_id) { tokenManagementGetPermissionLevels <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/tokens/permissionLevels") } - #' Get token permissions. #' #' Gets the permissions of all tokens. Tokens can inherit permissions from their @@ -79,13 +66,10 @@ tokenManagementGetPermissionLevels <- function(client) { tokenManagementGetPermissions <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/tokens") } - #' List all tokens. #' #' Lists all tokens associated with the specified workspace or user. #' @param client Required. Instance of DatabricksClient() - - #' #' @param created_by_id User ID of the user that created the token. #' @param created_by_username Username of the user that created the token. @@ -101,14 +85,11 @@ tokenManagementList <- function(client, created_by_id = NULL, created_by_usernam return(json$token_infos) } - #' Set token permissions. #' #' Sets permissions on all tokens. Tokens can inherit permissions from their #' root object. #' @param client Required. Instance of DatabricksClient() - - #' #' @param access_control_list This field has no description yet. #' @@ -118,14 +99,11 @@ tokenManagementSetPermissions <- function(client, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", "/api/2.0/permissions/authorization/tokens", body = body) } - #' Update token permissions. #' #' Updates the permissions on all tokens. Tokens can inherit permissions from #' their root object. #' @param client Required. Instance of DatabricksClient() - - #' #' @param access_control_list This field has no description yet. #' @@ -136,3 +114,10 @@ tokenManagementUpdatePermissions <- function(client, access_control_list = NULL) client$do("PATCH", "/api/2.0/permissions/authorization/tokens", body = body) } + + + + + + + diff --git a/R/tokens.R b/R/tokens.R index d1bd4413..1a0eba02 100755 --- a/R/tokens.R +++ b/R/tokens.R @@ -10,8 +10,6 @@ NULL #' authenticated token. If the user's token quota is exceeded, this call returns #' an error **QUOTA_EXCEEDED**. #' @param client Required. Instance of DatabricksClient() - - #' #' @param comment Optional description to attach to the token. #' @param lifetime_seconds The lifetime of the token, in seconds. @@ -22,7 +20,6 @@ tokensCreate <- function(client, comment = NULL, lifetime_seconds = NULL) { body <- list(comment = comment, lifetime_seconds = lifetime_seconds) client$do("POST", "/api/2.0/token/create", body = body) } - #' Revoke token. #' #' Revokes an access token. @@ -30,9 +27,6 @@ tokensCreate <- function(client, comment = NULL, lifetime_seconds = NULL) { #' If a token with the specified ID is not valid, this call returns an error #' **RESOURCE_DOES_NOT_EXIST**. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param token_id Required. The ID of the token to be revoked. #' @@ -42,7 +36,6 @@ tokensDelete <- function(client, token_id) { body <- list(token_id = token_id) client$do("POST", "/api/2.0/token/delete", body = body) } - #' List tokens. #' #' Lists all the valid tokens for a user-workspace pair. @@ -59,3 +52,5 @@ tokensList <- function(client) { } + + diff --git a/R/users.R b/R/users.R index f2ef8028..d6d90c0b 100755 --- a/R/users.R +++ b/R/users.R @@ -8,8 +8,6 @@ NULL #' Creates a new user in the Databricks workspace. This new user will also be #' added to the Databricks account. #' @param client Required. Instance of DatabricksClient() - - #' #' @param active If this user is active. #' @param display_name String that represents a concatenation of given and family names. @@ -33,15 +31,11 @@ usersCreate <- function(client, active = NULL, display_name = NULL, emails = NUL schemas = schemas, userName = user_name) client$do("POST", "/api/2.0/preview/scim/v2/Users", body = body) } - #' Delete a user. #' #' Deletes a user. Deleting a user from a Databricks workspace also removes #' objects associated with the user. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a user in the Databricks workspace. #' @@ -51,14 +45,10 @@ usersDelete <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/scim/v2/Users/", id, sep = "")) } - #' Get user details. #' #' Gets information for a specific user in Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param attributes Comma-separated list of attributes to return in response. #' @param count Desired number of results per page. @@ -77,7 +67,6 @@ usersGet <- function(client, id, attributes = NULL, count = NULL, excluded_attri filter = filter, sortBy = sort_by, sortOrder = sort_order, startIndex = start_index) client$do("GET", paste("/api/2.0/preview/scim/v2/Users/", id, sep = ""), query = query) } - #' Get password permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -88,7 +77,6 @@ usersGet <- function(client, id, attributes = NULL, count = NULL, excluded_attri usersGetPermissionLevels <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/passwords/permissionLevels") } - #' Get password permissions. #' #' Gets the permissions of all passwords. Passwords can inherit permissions from @@ -100,13 +88,10 @@ usersGetPermissionLevels <- function(client) { usersGetPermissions <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/passwords") } - #' List users. #' #' Gets details for all the users associated with a Databricks workspace. #' @param client Required. Instance of DatabricksClient() - - #' #' @param attributes Comma-separated list of attributes to return in response. #' @param count Desired number of results per page. @@ -141,15 +126,11 @@ usersList <- function(client, attributes = NULL, count = NULL, excluded_attribut return(results) } - #' Update user details. #' #' Partially updates a user resource by applying the supplied operations on #' specific user attributes. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Unique ID for a user in the Databricks workspace. #' @param operations This field has no description yet. @@ -161,14 +142,11 @@ usersPatch <- function(client, id, operations = NULL, schemas = NULL) { body <- list(Operations = operations, schemas = schemas) client$do("PATCH", paste("/api/2.0/preview/scim/v2/Users/", id, sep = ""), body = body) } - #' Set password permissions. #' #' Sets permissions on all passwords. Passwords can inherit permissions from #' their root object. #' @param client Required. Instance of DatabricksClient() - - #' #' @param access_control_list This field has no description yet. #' @@ -178,14 +156,10 @@ usersSetPermissions <- function(client, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", "/api/2.0/permissions/authorization/passwords", body = body) } - #' Replace a user. #' #' Replaces a user's information with the data supplied in request. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param active If this user is active. #' @param display_name String that represents a concatenation of given and family names. @@ -209,14 +183,11 @@ usersUpdate <- function(client, id, active = NULL, display_name = NULL, emails = schemas = schemas, userName = user_name) client$do("PUT", paste("/api/2.0/preview/scim/v2/Users/", id, sep = ""), body = body) } - #' Update password permissions. #' #' Updates the permissions on all passwords. Passwords can inherit permissions #' from their root object. #' @param client Required. Instance of DatabricksClient() - - #' #' @param access_control_list This field has no description yet. #' @@ -227,3 +198,12 @@ usersUpdatePermissions <- function(client, access_control_list = NULL) { client$do("PATCH", "/api/2.0/permissions/authorization/passwords", body = body) } + + + + + + + + + diff --git a/R/vector_search_endpoints.R b/R/vector_search_endpoints.R index 32e5b9e5..6d83d643 100755 --- a/R/vector_search_endpoints.R +++ b/R/vector_search_endpoints.R @@ -7,72 +7,18 @@ NULL #' #' Create a new endpoint. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Vector Search Endpoints on Databricks reach -#' ONLINE state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Vector Search Endpoints is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param endpoint_type Required. Type of endpoint. #' @param name Required. Name of endpoint. #' #' @rdname vectorSearchEndpointsCreateEndpoint #' @export -vectorSearchEndpointsCreateEndpoint <- function(client, name, endpoint_type, timeout = 20, - callback = cli_reporter) { +vectorSearchEndpointsCreateEndpoint <- function(client, name, endpoint_type) { body <- list(endpoint_type = endpoint_type, name = name) - op_response <- client$do("POST", "/api/2.0/vector-search/endpoints", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("ONLINE", c()) - failure_states <- c("OFFLINE", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- vectorSearchEndpointsGetEndpoint(client, endpoint_name = op_response$name) - status <- poll$endpoint_status$state - status_message <- paste("current status:", status) - if (!is.null(poll$endpoint_status)) { - status_message <- poll$endpoint_status$message - } - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach ONLINE, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::vectorSearchEndpointsGetEndpoint(endpoint_name=", - op_response$name, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/vector-search/endpoints", body = body) } - #' Delete an endpoint. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param endpoint_name Required. Name of the endpoint. #' @@ -83,12 +29,8 @@ vectorSearchEndpointsDeleteEndpoint <- function(client, endpoint_name) { client$do("DELETE", paste("/api/2.0/vector-search/endpoints/", endpoint_name, sep = "")) } - #' Get an endpoint. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param endpoint_name Required. Name of the endpoint. #' @@ -98,11 +40,8 @@ vectorSearchEndpointsGetEndpoint <- function(client, endpoint_name) { client$do("GET", paste("/api/2.0/vector-search/endpoints/", endpoint_name, sep = "")) } - #' List all endpoints. #' @param client Required. Instance of DatabricksClient() - - #' #' @param page_token Token for pagination. #' @@ -129,4 +68,69 @@ vectorSearchEndpointsListEndpoints <- function(client, page_token = NULL) { return(results) } +#' Create an endpoint. +#' +#' Create a new endpoint. +#' @param client Required. Instance of DatabricksClient() + +#' +#' @description +#' This is a long-running operation, which blocks until Vector Search Endpoints on Databricks reach +#' ONLINE state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Vector Search Endpoints is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param endpoint_type Required. Type of endpoint. +#' @param name Required. Name of endpoint. +#' +#' @rdname vectorSearchEndpointsCreateEndpointAndWait +#' @export +vectorSearchEndpointsCreateEndpointAndWait <- function(client, name, endpoint_type, + timeout = 20, callback = cli_reporter) { + body <- list(endpoint_type = endpoint_type, name = name) + op_response <- client$do("POST", "/api/2.0/vector-search/endpoints", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("ONLINE", c()) + failure_states <- c("OFFLINE", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- vectorSearchEndpointsGetEndpoint(client, endpoint_name = op_response$name) + status <- poll$endpoint_status$state + status_message <- paste("current status:", status) + if (!is.null(poll$endpoint_status)) { + status_message <- poll$endpoint_status$message + } + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach ONLINE, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::vectorSearchEndpointsGetEndpoint(endpoint_name=", + op_response$name, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + + + diff --git a/R/vector_search_indexes.R b/R/vector_search_indexes.R index 3bc2fc43..b89b9eb0 100755 --- a/R/vector_search_indexes.R +++ b/R/vector_search_indexes.R @@ -7,9 +7,6 @@ NULL #' #' Create a new index. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param delta_sync_index_spec Specification for Delta Sync Index. #' @param direct_access_index_spec Specification for Direct Vector Access Index. @@ -26,14 +23,10 @@ vectorSearchIndexesCreateIndex <- function(client, name, endpoint_name, primary_ endpoint_name = endpoint_name, index_type = index_type, name = name, primary_key = primary_key) client$do("POST", "/api/2.0/vector-search/indexes", body = body) } - #' Delete data from index. #' #' Handles the deletion of data from a specified vector index. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param index_name Required. Name of the vector index where data is to be deleted. #' @param primary_keys Required. List of primary keys for the data to be deleted. @@ -45,14 +38,10 @@ vectorSearchIndexesDeleteDataVectorIndex <- function(client, index_name, primary client$do("POST", paste("/api/2.0/vector-search/indexes/", index_name, "/delete-data", , sep = ""), body = body) } - #' Delete an index. #' #' Delete an index. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param index_name Required. Name of the index. #' @@ -62,14 +51,10 @@ vectorSearchIndexesDeleteIndex <- function(client, index_name) { client$do("DELETE", paste("/api/2.0/vector-search/indexes/", index_name, sep = "")) } - #' Get an index. #' #' Get an index. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param index_name Required. Name of the index. #' @@ -79,14 +64,10 @@ vectorSearchIndexesGetIndex <- function(client, index_name) { client$do("GET", paste("/api/2.0/vector-search/indexes/", index_name, sep = "")) } - #' List indexes. #' #' List all indexes in the given endpoint. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param endpoint_name Required. Name of the endpoint. #' @param page_token Token for pagination. @@ -114,14 +95,10 @@ vectorSearchIndexesListIndexes <- function(client, endpoint_name, page_token = N return(results) } - #' Query an index. #' #' Query the specified vector index. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param columns Required. List of column names to include in the response. #' @param filters_json JSON string representing query filters. @@ -140,14 +117,10 @@ vectorSearchIndexesQueryIndex <- function(client, index_name, columns, filters_j client$do("POST", paste("/api/2.0/vector-search/indexes/", index_name, "/query", , sep = ""), body = body) } - #' Synchronize an index. #' #' Triggers a synchronization process for a specified vector index. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param index_name Required. Name of the vector index to synchronize. #' @@ -158,14 +131,10 @@ vectorSearchIndexesSyncIndex <- function(client, index_name) { client$do("POST", paste("/api/2.0/vector-search/indexes/", index_name, "/sync", , sep = "")) } - #' Upsert data into an index. #' #' Handles the upserting of data into a specified vector index. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param index_name Required. Name of the vector index where data is to be upserted. #' @param inputs_json Required. JSON string representing the data to be upserted. @@ -178,3 +147,10 @@ vectorSearchIndexesUpsertDataVectorIndex <- function(client, index_name, inputs_ , sep = ""), body = body) } + + + + + + + diff --git a/R/volumes.R b/R/volumes.R index 7878a332..2624f88b 100755 --- a/R/volumes.R +++ b/R/volumes.R @@ -24,9 +24,6 @@ NULL #' storage location. - The specified storage location is not under the location #' of other tables, nor volumes, or catalogs or schemas. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. The name of the catalog where the schema and the volume are. #' @param comment The comment attached to the volume. @@ -43,7 +40,6 @@ volumesCreate <- function(client, catalog_name, schema_name, name, volume_type, storage_location = storage_location, volume_type = volume_type) client$do("POST", "/api/2.1/unity-catalog/volumes", body = body) } - #' Delete a Volume. #' #' Deletes a volume from the specified parent catalog and schema. @@ -53,9 +49,6 @@ volumesCreate <- function(client, catalog_name, schema_name, name, volume_type, #' privilege on the parent catalog and the **USE_SCHEMA** privilege on the #' parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The three-level (fully qualified) name of the volume. #' @@ -65,7 +58,6 @@ volumesDelete <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/volumes/", name, sep = "")) } - #' List Volumes. #' #' Gets an array of volumes for the current metastore under the parent catalog @@ -80,9 +72,6 @@ volumesDelete <- function(client, name) { #' #' There is no guarantee of a specific ordering of the elements in the array. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param catalog_name Required. The identifier of the catalog. #' @param include_browse Whether to include volumes in the response for which the principal can only access selective metadata for. @@ -115,7 +104,6 @@ volumesList <- function(client, catalog_name, schema_name, include_browse = NULL return(results) } - #' Get a Volume. #' #' Gets a volume from the metastore for a specific catalog and schema. @@ -125,9 +113,6 @@ volumesList <- function(client, catalog_name, schema_name, include_browse = NULL #' be the owner or have the **USE_CATALOG** privilege on the parent catalog and #' the **USE_SCHEMA** privilege on the parent schema. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param include_browse Whether to include volumes in the response for which the principal can only access selective metadata for. #' @param name Required. The three-level (fully qualified) name of the volume. @@ -138,7 +123,6 @@ volumesRead <- function(client, name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/volumes/", name, sep = ""), query = query) } - #' Update a Volume. #' #' Updates the specified volume under the specified parent catalog and schema. @@ -151,9 +135,6 @@ volumesRead <- function(client, name, include_browse = NULL) { #' Currently only the name, the owner or the comment of the volume could be #' updated. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param comment The comment attached to the volume. #' @param name Required. The three-level (fully qualified) name of the volume. @@ -168,3 +149,7 @@ volumesUpdate <- function(client, name, comment = NULL, new_name = NULL, owner = body = body) } + + + + diff --git a/R/warehouses.R b/R/warehouses.R index c1fd81d0..8f6fee7e 100755 --- a/R/warehouses.R +++ b/R/warehouses.R @@ -7,16 +7,6 @@ NULL #' #' Creates a new SQL warehouse. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Warehouses on Databricks reach -#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Warehouses is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - #' #' @param auto_stop_mins The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. #' @param channel Channel Details. @@ -37,60 +27,18 @@ NULL warehousesCreate <- function(client, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, - name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL, - timeout = 20, callback = cli_reporter) { + name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL) { body <- list(auto_stop_mins = auto_stop_mins, channel = channel, cluster_size = cluster_size, creator_name = creator_name, enable_photon = enable_photon, enable_serverless_compute = enable_serverless_compute, instance_profile_arn = instance_profile_arn, max_num_clusters = max_num_clusters, min_num_clusters = min_num_clusters, name = name, spot_instance_policy = spot_instance_policy, tags = tags, warehouse_type = warehouse_type) - op_response <- client$do("POST", "/api/2.0/sql/warehouses", body = body) - started <- as.numeric(Sys.time()) - target_states <- c("RUNNING", c()) - failure_states <- c("STOPPED", "DELETED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- warehousesGet(client, id = op_response$id) - status <- poll$state - status_message <- paste("current status:", status) - if (!is.null(poll$health)) { - status_message <- poll$health$summary - } - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::warehousesGet(id=", op_response$id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", "/api/2.0/sql/warehouses", body = body) } - #' Delete a warehouse. #' #' Deletes a SQL warehouse. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Required. #' @@ -100,22 +48,10 @@ warehousesDelete <- function(client, id) { client$do("DELETE", paste("/api/2.0/sql/warehouses/", id, sep = "")) } - #' Update a warehouse. #' #' Updates the configuration for a SQL warehouse. #' @param client Required. Instance of DatabricksClient() - -#' -#' @description -#' This is a long-running operation, which blocks until Warehouses on Databricks reach -#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. -#' By default, the state of Databricks Warehouses is reported to console. You can change this behavior -#' by changing the `callback` parameter. -#' @param timeout Time to wait for the operation to complete in minutes. -#' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param auto_stop_mins The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. #' @param channel Channel Details. @@ -137,61 +73,19 @@ warehousesDelete <- function(client, id) { warehousesEdit <- function(client, id, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, - name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL, - timeout = 20, callback = cli_reporter) { + name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL) { body <- list(auto_stop_mins = auto_stop_mins, channel = channel, cluster_size = cluster_size, creator_name = creator_name, enable_photon = enable_photon, enable_serverless_compute = enable_serverless_compute, instance_profile_arn = instance_profile_arn, max_num_clusters = max_num_clusters, min_num_clusters = min_num_clusters, name = name, spot_instance_policy = spot_instance_policy, tags = tags, warehouse_type = warehouse_type) - op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/edit", - , sep = ""), body = body) - started <- as.numeric(Sys.time()) - target_states <- c("RUNNING", c()) - failure_states <- c("STOPPED", "DELETED", c()) - status_message <- "polling..." - attempt <- 1 - while ((started + (timeout * 60)) > as.numeric(Sys.time())) { - poll <- warehousesGet(client, id = id) - status <- poll$state - status_message <- paste("current status:", status) - if (!is.null(poll$health)) { - status_message <- poll$health$summary - } - if (status %in% target_states) { - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = TRUE) - } - return(poll) - } - if (status %in% failure_states) { - msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) - rlang::abort(msg, call = rlang::caller_env()) - } - prefix <- paste0("databricks::warehousesGet(id=", id, ")") - sleep <- attempt - if (sleep > 10) { - # sleep 10s max per attempt - sleep <- 10 - } - if (!is.null(callback)) { - callback(paste0(status, ": ", status_message), done = FALSE) - } - random_pause <- runif(1, min = 0.1, max = 0.5) - Sys.sleep(sleep + random_pause) - attempt <- attempt + 1 - } - msg <- paste("timed out after", timeout, "minutes:", status_message) - rlang::abort(msg, call = rlang::caller_env()) + client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/edit", , sep = ""), + body = body) } - #' Get warehouse info. #' #' Gets the information for a single SQL warehouse. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param id Required. Required. #' @@ -201,14 +95,10 @@ warehousesGet <- function(client, id) { client$do("GET", paste("/api/2.0/sql/warehouses/", id, sep = "")) } - #' Get SQL warehouse permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. #' @@ -219,15 +109,11 @@ warehousesGetPermissionLevels <- function(client, warehouse_id) { client$do("GET", paste("/api/2.0/permissions/warehouses/", warehouse_id, "/permissionLevels", , sep = "")) } - #' Get SQL warehouse permissions. #' #' Gets the permissions of a SQL warehouse. SQL warehouses can inherit #' permissions from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. #' @@ -237,7 +123,6 @@ warehousesGetPermissions <- function(client, warehouse_id) { client$do("GET", paste("/api/2.0/permissions/warehouses/", warehouse_id, sep = "")) } - #' Get the workspace configuration. #' #' Gets the workspace level configuration that is shared by all SQL warehouses @@ -249,13 +134,10 @@ warehousesGetPermissions <- function(client, warehouse_id) { warehousesGetWorkspaceWarehouseConfig <- function(client) { client$do("GET", "/api/2.0/sql/config/warehouses") } - #' List warehouses. #' #' Lists all SQL warehouses that a user has manager permissions on. #' @param client Required. Instance of DatabricksClient() - - #' #' @param run_as_user_id Service Principal which will be used to fetch the list of warehouses. #' @@ -270,15 +152,11 @@ warehousesList <- function(client, run_as_user_id = NULL) { return(json$warehouses) } - #' Set SQL warehouse permissions. #' #' Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions #' from their root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. @@ -290,14 +168,11 @@ warehousesSetPermissions <- function(client, warehouse_id, access_control_list = client$do("PUT", paste("/api/2.0/permissions/warehouses/", warehouse_id, sep = ""), body = body) } - #' Set the workspace configuration. #' #' Sets the workspace level configuration that is shared by all SQL warehouses #' in a workspace. #' @param client Required. Instance of DatabricksClient() - - #' #' @param channel Optional: Channel selection details. #' @param config_param Deprecated: Use sql_configuration_parameters. @@ -321,11 +196,52 @@ warehousesSetWorkspaceWarehouseConfig <- function(client, channel = NULL, config security_policy = security_policy, sql_configuration_parameters = sql_configuration_parameters) client$do("PUT", "/api/2.0/sql/config/warehouses", body = body) } - #' Start a warehouse. #' #' Starts a SQL warehouse. #' @param client Required. Instance of DatabricksClient() +#' +#' @param id Required. Required. +#' +#' @rdname warehousesStart +#' @export +warehousesStart <- function(client, id) { + + client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/start", , sep = "")) +} +#' Stop a warehouse. +#' +#' Stops a SQL warehouse. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param id Required. Required. +#' +#' @rdname warehousesStop +#' @export +warehousesStop <- function(client, id) { + + client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/stop", , sep = "")) +} +#' Update SQL warehouse permissions. +#' +#' Updates the permissions on a SQL warehouse. SQL warehouses can inherit +#' permissions from their root object. +#' @param client Required. Instance of DatabricksClient() +#' +#' @param access_control_list This field has no description yet. +#' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. +#' +#' @rdname warehousesUpdatePermissions +#' @export +warehousesUpdatePermissions <- function(client, warehouse_id, access_control_list = NULL) { + body <- list(access_control_list = access_control_list) + client$do("PATCH", paste("/api/2.0/permissions/warehouses/", warehouse_id, sep = ""), + body = body) +} +#' Create a warehouse. +#' +#' Creates a new SQL warehouse. +#' @param client Required. Instance of DatabricksClient() #' #' @description @@ -335,17 +251,117 @@ warehousesSetWorkspaceWarehouseConfig <- function(client, channel = NULL, config #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. +#' +#' @param auto_stop_mins The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. +#' @param channel Channel Details. +#' @param cluster_size Size of the clusters allocated for this warehouse. +#' @param creator_name warehouse creator name. +#' @param enable_photon Configures whether the warehouse should use Photon optimized clusters. +#' @param enable_serverless_compute Configures whether the warehouse should use serverless compute. +#' @param instance_profile_arn Deprecated. +#' @param max_num_clusters Maximum number of clusters that the autoscaler will create to handle concurrent queries. +#' @param min_num_clusters Minimum number of available clusters that will be maintained for this SQL warehouse. +#' @param name Logical name for the cluster. +#' @param spot_instance_policy Configurations whether the warehouse should use spot instances. +#' @param tags A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. +#' @param warehouse_type Warehouse type: `PRO` or `CLASSIC`. +#' +#' @rdname warehousesCreateAndWait +#' @export +warehousesCreateAndWait <- function(client, auto_stop_mins = NULL, channel = NULL, + cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, + instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, + name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL, + timeout = 20, callback = cli_reporter) { + body <- list(auto_stop_mins = auto_stop_mins, channel = channel, cluster_size = cluster_size, + creator_name = creator_name, enable_photon = enable_photon, enable_serverless_compute = enable_serverless_compute, + instance_profile_arn = instance_profile_arn, max_num_clusters = max_num_clusters, + min_num_clusters = min_num_clusters, name = name, spot_instance_policy = spot_instance_policy, + tags = tags, warehouse_type = warehouse_type) + op_response <- client$do("POST", "/api/2.0/sql/warehouses", body = body) + started <- as.numeric(Sys.time()) + target_states <- c("RUNNING", c()) + failure_states <- c("STOPPED", "DELETED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- warehousesGet(client, id = op_response$id) + status <- poll$state + status_message <- paste("current status:", status) + if (!is.null(poll$health)) { + status_message <- poll$health$summary + } + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + if (status %in% failure_states) { + msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } + prefix <- paste0("databricks::warehousesGet(id=", op_response$id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) +} + + +#' Update a warehouse. +#' +#' Updates the configuration for a SQL warehouse. +#' @param client Required. Instance of DatabricksClient() #' +#' @description +#' This is a long-running operation, which blocks until Warehouses on Databricks reach +#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Warehouses is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. #' +#' @param auto_stop_mins The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. +#' @param channel Channel Details. +#' @param cluster_size Size of the clusters allocated for this warehouse. +#' @param creator_name warehouse creator name. +#' @param enable_photon Configures whether the warehouse should use Photon optimized clusters. +#' @param enable_serverless_compute Configures whether the warehouse should use serverless compute. #' @param id Required. Required. +#' @param instance_profile_arn Deprecated. +#' @param max_num_clusters Maximum number of clusters that the autoscaler will create to handle concurrent queries. +#' @param min_num_clusters Minimum number of available clusters that will be maintained for this SQL warehouse. +#' @param name Logical name for the cluster. +#' @param spot_instance_policy Configurations whether the warehouse should use spot instances. +#' @param tags A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. +#' @param warehouse_type Warehouse type: `PRO` or `CLASSIC`. #' -#' @rdname warehousesStart +#' @rdname warehousesEditAndWait #' @export -warehousesStart <- function(client, id, timeout = 20, callback = cli_reporter) { - - op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/start", - , sep = "")) +warehousesEditAndWait <- function(client, id, auto_stop_mins = NULL, channel = NULL, + cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, + instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, + name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL, + timeout = 20, callback = cli_reporter) { + body <- list(auto_stop_mins = auto_stop_mins, channel = channel, cluster_size = cluster_size, + creator_name = creator_name, enable_photon = enable_photon, enable_serverless_compute = enable_serverless_compute, + instance_profile_arn = instance_profile_arn, max_num_clusters = max_num_clusters, + min_num_clusters = min_num_clusters, name = name, spot_instance_policy = spot_instance_policy, + tags = tags, warehouse_type = warehouse_type) + op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/edit", + , sep = ""), body = body) started <- as.numeric(Sys.time()) target_states <- c("RUNNING", c()) failure_states <- c("STOPPED", "DELETED", c()) @@ -385,32 +401,38 @@ warehousesStart <- function(client, id, timeout = 20, callback = cli_reporter) { rlang::abort(msg, call = rlang::caller_env()) } -#' Stop a warehouse. + + + + + + + +#' Start a warehouse. #' -#' Stops a SQL warehouse. +#' Starts a SQL warehouse. #' @param client Required. Instance of DatabricksClient() #' #' @description #' This is a long-running operation, which blocks until Warehouses on Databricks reach -#' STOPPED state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' RUNNING state with the timeout of 20 minutes, that you can change via `timeout` parameter. #' By default, the state of Databricks Warehouses is reported to console. You can change this behavior #' by changing the `callback` parameter. #' @param timeout Time to wait for the operation to complete in minutes. #' @param callback Function to report the status of the operation. By default, it reports to console. - -#' #' #' @param id Required. Required. #' -#' @rdname warehousesStop +#' @rdname warehousesStartAndWait #' @export -warehousesStop <- function(client, id, timeout = 20, callback = cli_reporter) { +warehousesStartAndWait <- function(client, id, timeout = 20, callback = cli_reporter) { - op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/stop", + op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/start", , sep = "")) started <- as.numeric(Sys.time()) - target_states <- c("STOPPED", c()) + target_states <- c("RUNNING", c()) + failure_states <- c("STOPPED", "DELETED", c()) status_message <- "polling..." attempt <- 1 while ((started + (timeout * 60)) > as.numeric(Sys.time())) { @@ -426,6 +448,10 @@ warehousesStop <- function(client, id, timeout = 20, callback = cli_reporter) { } return(poll) } + if (status %in% failure_states) { + msg <- paste("failed to reach RUNNING, got ", status, "-", status_message) + rlang::abort(msg, call = rlang::caller_env()) + } prefix <- paste0("databricks::warehousesGet(id=", id, ")") sleep <- attempt if (sleep > 10) { @@ -443,23 +469,60 @@ warehousesStop <- function(client, id, timeout = 20, callback = cli_reporter) { rlang::abort(msg, call = rlang::caller_env()) } -#' Update SQL warehouse permissions. +#' Stop a warehouse. #' -#' Updates the permissions on a SQL warehouse. SQL warehouses can inherit -#' permissions from their root object. +#' Stops a SQL warehouse. #' @param client Required. Instance of DatabricksClient() - #' +#' @description +#' This is a long-running operation, which blocks until Warehouses on Databricks reach +#' STOPPED state with the timeout of 20 minutes, that you can change via `timeout` parameter. +#' By default, the state of Databricks Warehouses is reported to console. You can change this behavior +#' by changing the `callback` parameter. +#' @param timeout Time to wait for the operation to complete in minutes. +#' @param callback Function to report the status of the operation. By default, it reports to console. #' -#' @param access_control_list This field has no description yet. -#' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. +#' @param id Required. Required. #' -#' @rdname warehousesUpdatePermissions +#' @rdname warehousesStopAndWait #' @export -warehousesUpdatePermissions <- function(client, warehouse_id, access_control_list = NULL) { - body <- list(access_control_list = access_control_list) - client$do("PATCH", paste("/api/2.0/permissions/warehouses/", warehouse_id, sep = ""), - body = body) +warehousesStopAndWait <- function(client, id, timeout = 20, callback = cli_reporter) { + + op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/stop", + , sep = "")) + started <- as.numeric(Sys.time()) + target_states <- c("STOPPED", c()) + status_message <- "polling..." + attempt <- 1 + while ((started + (timeout * 60)) > as.numeric(Sys.time())) { + poll <- warehousesGet(client, id = id) + status <- poll$state + status_message <- paste("current status:", status) + if (!is.null(poll$health)) { + status_message <- poll$health$summary + } + if (status %in% target_states) { + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = TRUE) + } + return(poll) + } + prefix <- paste0("databricks::warehousesGet(id=", id, ")") + sleep <- attempt + if (sleep > 10) { + # sleep 10s max per attempt + sleep <- 10 + } + if (!is.null(callback)) { + callback(paste0(status, ": ", status_message), done = FALSE) + } + random_pause <- runif(1, min = 0.1, max = 0.5) + Sys.sleep(sleep + random_pause) + attempt <- attempt + 1 + } + msg <- paste("timed out after", timeout, "minutes:", status_message) + rlang::abort(msg, call = rlang::caller_env()) } + diff --git a/R/workspace.R b/R/workspace.R index a7f7e96e..3c269ccf 100755 --- a/R/workspace.R +++ b/R/workspace.R @@ -14,9 +14,6 @@ NULL #' Object deletion cannot be undone and deleting a directory recursively is not #' atomic. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Required. The absolute path of the notebook or directory. #' @param recursive The flag that specifies whether to delete the object recursively. @@ -27,7 +24,6 @@ workspaceDelete <- function(client, path, recursive = NULL) { body <- list(path = path, recursive = recursive) client$do("POST", "/api/2.0/workspace/delete", body = body) } - #' Export a workspace object. #' #' Exports an object or the contents of an entire directory. @@ -39,9 +35,6 @@ workspaceDelete <- function(client, path, recursive = NULL) { #' `MAX_NOTEBOOK_SIZE_EXCEEDED`. Currently, this API does not support exporting #' a library. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param format This specifies the format of the exported file. #' @param path Required. The absolute path of the object or directory. @@ -52,14 +45,10 @@ workspaceExport <- function(client, path, format = NULL) { query <- list(format = format, path = path) client$do("GET", "/api/2.0/workspace/export", query = query) } - #' Get workspace object permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. #' @param workspace_object_type Required. The workspace object type for which to get or manage permissions. @@ -71,15 +60,11 @@ workspaceGetPermissionLevels <- function(client, workspace_object_type, workspac client$do("GET", paste("/api/2.0/permissions/", workspace_object_type, "/", workspace_object_id, "/permissionLevels", , sep = "")) } - #' Get workspace object permissions. #' #' Gets the permissions of a workspace object. Workspace objects can inherit #' permissions from their parent objects or root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. #' @param workspace_object_type Required. The workspace object type for which to get or manage permissions. @@ -91,15 +76,11 @@ workspaceGetPermissions <- function(client, workspace_object_type, workspace_obj client$do("GET", paste("/api/2.0/permissions/", workspace_object_type, "/", workspace_object_id, sep = "")) } - #' Get status. #' #' Gets the status of an object or a directory. If `path` does not exist, this #' call returns an error `RESOURCE_DOES_NOT_EXIST`. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Required. The absolute path of the notebook or directory. #' @@ -109,7 +90,6 @@ workspaceGetStatus <- function(client, path) { query <- list(path = path) client$do("GET", "/api/2.0/workspace/get-status", query = query) } - #' Import a workspace object. #' #' Imports a workspace object (for example, a notebook or file) or the contents @@ -119,9 +99,6 @@ workspaceGetStatus <- function(client, path) { #' the `language` field unset. To import a single file as `SOURCE`, you must set #' the `language` field. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param content The base64-encoded content. #' @param format This specifies the format of the file to be imported. @@ -137,16 +114,12 @@ workspaceImport <- function(client, path, content = NULL, format = NULL, languag path = path) client$do("POST", "/api/2.0/workspace/import", body = body) } - #' List contents. #' #' Lists the contents of a directory, or the object if it is not a directory. If #' the input path does not exist, this call returns an error #' `RESOURCE_DOES_NOT_EXIST`. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param notebooks_modified_after UTC timestamp in milliseconds. #' @param path Required. The absolute path of the notebook or directory. @@ -162,7 +135,6 @@ workspaceList <- function(client, path, notebooks_modified_after = NULL) { return(json$objects) } - #' Create a directory. #' #' Creates the specified directory (and necessary parent directories if they do @@ -172,9 +144,6 @@ workspaceList <- function(client, path, notebooks_modified_after = NULL) { #' Note that if this operation fails it may have succeeded in creating some of #' the necessary parent directories. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param path Required. The absolute path of the directory. #' @@ -184,15 +153,11 @@ workspaceMkdirs <- function(client, path) { body <- list(path = path) client$do("POST", "/api/2.0/workspace/mkdirs", body = body) } - #' Set workspace object permissions. #' #' Sets permissions on a workspace object. Workspace objects can inherit #' permissions from their parent objects or root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. @@ -206,15 +171,11 @@ workspaceSetPermissions <- function(client, workspace_object_type, workspace_obj client$do("PUT", paste("/api/2.0/permissions/", workspace_object_type, "/", workspace_object_id, sep = ""), body = body) } - #' Update workspace object permissions. #' #' Updates the permissions on a workspace object. Workspace objects can inherit #' permissions from their parent objects or root object. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param access_control_list This field has no description yet. #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. @@ -229,3 +190,12 @@ workspaceUpdatePermissions <- function(client, workspace_object_type, workspace_ workspace_object_id, sep = ""), body = body) } + + + + + + + + + diff --git a/R/workspace_bindings.R b/R/workspace_bindings.R index 6638ccbb..b046b8f9 100755 --- a/R/workspace_bindings.R +++ b/R/workspace_bindings.R @@ -8,9 +8,6 @@ NULL #' Gets workspace bindings of the catalog. The caller must be a metastore admin #' or an owner of the catalog. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param name Required. The name of the catalog. #' @@ -21,15 +18,11 @@ workspaceBindingsGet <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/workspace-bindings/catalogs/", name, sep = "")) } - #' Get securable workspace bindings. #' #' Gets workspace bindings of the securable. The caller must be a metastore #' admin or an owner of the securable. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param securable_name Required. The name of the securable. #' @param securable_type Required. The type of the securable. @@ -41,15 +34,11 @@ workspaceBindingsGetBindings <- function(client, securable_type, securable_name) client$do("GET", paste("/api/2.1/unity-catalog/bindings/", securable_type, "/", securable_name, sep = "")) } - #' Update catalog workspace bindings. #' #' Updates workspace bindings of the catalog. The caller must be a metastore #' admin or an owner of the catalog. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param assign_workspaces A list of workspace IDs. #' @param name Required. The name of the catalog. @@ -62,15 +51,11 @@ workspaceBindingsUpdate <- function(client, name, assign_workspaces = NULL, unas client$do("PATCH", paste("/api/2.1/unity-catalog/workspace-bindings/catalogs/", name, sep = ""), body = body) } - #' Update securable workspace bindings. #' #' Updates workspace bindings of the securable. The caller must be a metastore #' admin or an owner of the securable. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param add List of workspace bindings. #' @param remove List of workspace bindings. @@ -86,3 +71,6 @@ workspaceBindingsUpdateBindings <- function(client, securable_type, securable_na "/", securable_name, sep = ""), body = body) } + + + diff --git a/R/workspace_conf.R b/R/workspace_conf.R index 8420211a..b16e4e7f 100755 --- a/R/workspace_conf.R +++ b/R/workspace_conf.R @@ -7,9 +7,6 @@ NULL #' #' Gets the configuration status for a workspace. #' @param client Required. Instance of DatabricksClient() - - -#' #' #' @param keys Required. This field has no description yet. #' @@ -19,14 +16,11 @@ workspaceConfGetStatus <- function(client, keys) { query <- list(keys = keys) client$do("GET", "/api/2.0/workspace-conf", query = query) } - #' Enable/disable features. #' #' Sets the configuration status for a workspace, including enabling or #' disabling it. #' @param client Required. Instance of DatabricksClient() - - #' #' #' @rdname workspaceConfSetStatus @@ -36,3 +30,4 @@ workspaceConfSetStatus <- function(client) { client$do("PATCH", "/api/2.0/workspace-conf") } + diff --git a/README.md b/README.md index 6ea20a15..681e205d 100644 --- a/README.md +++ b/README.md @@ -22,8 +22,8 @@ library(dplyr) library(databricks) client <- DatabricksClient() running <- clustersList(client) %>% filter(state == 'RUNNING') -context <- commandExecutionCreate(client, cluster_id=running$cluster_id, language='python') -res <- commandExecutionExecute(client, cluster_id=running$cluster_id, context_id=context$id, language='sql', command='show tables') +context <- commandExecutionCreateAndWait(client, cluster_id=running$cluster_id, language='python') +res <- commandExecutionExecuteAndWait(client, cluster_id=running$cluster_id, context_id=context$id, language='sql', command='show tables') res ``` @@ -46,7 +46,7 @@ All `list` methods (and those, which return any list of results), do consistentl All long-running operations do poll Databricks backend until the entity reaches desired state: ```r -> clustersCreate(client, spark_version = "12.x-snapshot-scala2.12", cluster_name = "r-sdk-cluster", num_workers = 1, autotermination_minutes=20, node_type_id="i3.xlarge") +> clustersCreateAndWait(client, spark_version = "12.x-snapshot-scala2.12", cluster_name = "r-sdk-cluster", num_workers = 1, autotermination_minutes=20, node_type_id="i3.xlarge") PENDING: Finding instances for new nodes, acquiring more instances if necessary ``` diff --git a/man/clustersCreate.Rd b/man/clustersCreate.Rd index d9859c9b..1d4a4297 100644 --- a/man/clustersCreate.Rd +++ b/man/clustersCreate.Rd @@ -33,9 +33,7 @@ clustersCreate( spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, - workload_type = NULL, - timeout = 20, - callback = cli_reporter + workload_type = NULL ) } \arguments{ @@ -96,23 +94,14 @@ clustersCreate( \item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} \item{workload_type}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Creates a new Spark cluster. This method will acquire new instances from the cloud provider if necessary. Note: Databricks may not be able to acquire some of the requested nodes, due to cloud provider limitations (account limits, spot price, etc.) or transient network issues. - +} +\details{ If Databricks acquires at least 85\% of the requested on-demand nodes, cluster creation will succeed. Otherwise the cluster will terminate with an informative error message. diff --git a/man/clustersCreateAndWait.Rd b/man/clustersCreateAndWait.Rd new file mode 100644 index 00000000..5f18438d --- /dev/null +++ b/man/clustersCreateAndWait.Rd @@ -0,0 +1,119 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{clustersCreateAndWait} +\alias{clustersCreateAndWait} +\title{Create new cluster.} +\usage{ +clustersCreateAndWait( + client, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{spark_version}{Required. The Spark version of the cluster, e.g.} + +\item{apply_policy_default_values}{This field has no description yet.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} + +\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} + +\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} + +\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} + +\item{cluster_name}{Cluster name requested by the user.} + +\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} + +\item{custom_tags}{Additional tags for cluster resources.} + +\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} + +\item{docker_image}{This field has no description yet.} + +\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} + +\item{driver_node_type_id}{The node type of the Spark driver.} + +\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} + +\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} + +\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} + +\item{init_scripts}{The configuration for storing init scripts.} + +\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} + +\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} + +\item{runtime_engine}{Decides which runtime engine to be use, e.g.} + +\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} + +\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} + +\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} + +\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} + +\item{workload_type}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Creates a new Spark cluster. This method will acquire new instances from the +cloud provider if necessary. Note: Databricks may not be able to acquire some +of the requested nodes, due to cloud provider limitations (account limits, +spot price, etc.) or transient network issues. + +If Databricks acquires at least 85\% of the requested on-demand nodes, cluster +creation will succeed. Otherwise the cluster will terminate with an +informative error message. +} diff --git a/man/clustersDelete.Rd b/man/clustersDelete.Rd index 8caf005c..06b90ac1 100644 --- a/man/clustersDelete.Rd +++ b/man/clustersDelete.Rd @@ -4,24 +4,14 @@ \alias{clustersDelete} \title{Terminate cluster.} \usage{ -clustersDelete(client, cluster_id, timeout = 20, callback = cli_reporter) +clustersDelete(client, cluster_id) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} \item{cluster_id}{Required. The cluster to be terminated.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -TERMINATED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. Once the termination has completed, the cluster will be in a \code{TERMINATED} state. If the cluster is already in a \code{TERMINATING} or diff --git a/man/clustersDeleteAndWait.Rd b/man/clustersDeleteAndWait.Rd new file mode 100644 index 00000000..2156eed5 --- /dev/null +++ b/man/clustersDeleteAndWait.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{clustersDeleteAndWait} +\alias{clustersDeleteAndWait} +\title{Terminate cluster.} +\usage{ +clustersDeleteAndWait( + client, + cluster_id, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be terminated.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +TERMINATED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Terminates the Spark cluster with the specified ID. The cluster is removed +asynchronously. Once the termination has completed, the cluster will be in a +\code{TERMINATED} state. If the cluster is already in a \code{TERMINATING} or +\code{TERMINATED} state, nothing will happen. +} diff --git a/man/clustersEdit.Rd b/man/clustersEdit.Rd index aacbf435..1b212a9d 100644 --- a/man/clustersEdit.Rd +++ b/man/clustersEdit.Rd @@ -34,9 +34,7 @@ clustersEdit( spark_conf = NULL, spark_env_vars = NULL, ssh_public_keys = NULL, - workload_type = NULL, - timeout = 20, - callback = cli_reporter + workload_type = NULL ) } \arguments{ @@ -99,21 +97,12 @@ clustersEdit( \item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} \item{workload_type}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Updates the configuration of a cluster to match the provided attributes and size. A cluster can be updated if it is in a \code{RUNNING} or \code{TERMINATED} state. - +} +\details{ If a cluster is updated while in a \code{RUNNING} state, it will be restarted so that the new attributes can take effect. diff --git a/man/clustersEditAndWait.Rd b/man/clustersEditAndWait.Rd new file mode 100644 index 00000000..b25a0ef3 --- /dev/null +++ b/man/clustersEditAndWait.Rd @@ -0,0 +1,126 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{clustersEditAndWait} +\alias{clustersEditAndWait} +\title{Update cluster configuration.} +\usage{ +clustersEditAndWait( + client, + cluster_id, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. ID of the cluser.} + +\item{spark_version}{Required. The Spark version of the cluster, e.g.} + +\item{apply_policy_default_values}{This field has no description yet.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} + +\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} + +\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} + +\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} + +\item{cluster_name}{Cluster name requested by the user.} + +\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} + +\item{custom_tags}{Additional tags for cluster resources.} + +\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} + +\item{docker_image}{This field has no description yet.} + +\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} + +\item{driver_node_type_id}{The node type of the Spark driver.} + +\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} + +\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} + +\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} + +\item{init_scripts}{The configuration for storing init scripts.} + +\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} + +\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} + +\item{runtime_engine}{Decides which runtime engine to be use, e.g.} + +\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} + +\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} + +\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} + +\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} + +\item{workload_type}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Updates the configuration of a cluster to match the provided attributes and +size. A cluster can be updated if it is in a \code{RUNNING} or \code{TERMINATED} state. + +If a cluster is updated while in a \code{RUNNING} state, it will be restarted so +that the new attributes can take effect. + +If a cluster is updated while in a \code{TERMINATED} state, it will remain +\code{TERMINATED}. The next time it is started using the \code{clusters/start} API, the +new attributes will take effect. Any attempt to update a cluster in any other +state will be rejected with an \code{INVALID_STATE} error code. + +Clusters created by the Databricks Jobs service cannot be edited. +} diff --git a/man/clustersResize.Rd b/man/clustersResize.Rd index 9bedada6..156cb57c 100644 --- a/man/clustersResize.Rd +++ b/man/clustersResize.Rd @@ -4,14 +4,7 @@ \alias{clustersResize} \title{Resize cluster.} \usage{ -clustersResize( - client, - cluster_id, - autoscale = NULL, - num_workers = NULL, - timeout = 20, - callback = cli_reporter -) +clustersResize(client, cluster_id, autoscale = NULL, num_workers = NULL) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} @@ -21,18 +14,8 @@ clustersResize( \item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} \item{num_workers}{Number of worker nodes that this cluster should have.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a \code{RUNNING} state. } diff --git a/man/clustersResizeAndWait.Rd b/man/clustersResizeAndWait.Rd new file mode 100644 index 00000000..5d4781eb --- /dev/null +++ b/man/clustersResizeAndWait.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{clustersResizeAndWait} +\alias{clustersResizeAndWait} +\title{Resize cluster.} +\usage{ +clustersResizeAndWait( + client, + cluster_id, + autoscale = NULL, + num_workers = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be resized.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Resizes a cluster to have a desired number of workers. This will fail unless +the cluster is in a \code{RUNNING} state. +} diff --git a/man/clustersRestart.Rd b/man/clustersRestart.Rd index 40d44c66..83ed3707 100644 --- a/man/clustersRestart.Rd +++ b/man/clustersRestart.Rd @@ -4,13 +4,7 @@ \alias{clustersRestart} \title{Restart cluster.} \usage{ -clustersRestart( - client, - cluster_id, - restart_user = NULL, - timeout = 20, - callback = cli_reporter -) +clustersRestart(client, cluster_id, restart_user = NULL) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} @@ -18,18 +12,8 @@ clustersRestart( \item{cluster_id}{Required. The cluster to be started.} \item{restart_user}{\if{html}{\out{}}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Restarts a Spark cluster with the supplied ID. If the cluster is not currently in a \code{RUNNING} state, nothing will happen. } diff --git a/man/clustersRestartAndWait.Rd b/man/clustersRestartAndWait.Rd new file mode 100644 index 00000000..b2962000 --- /dev/null +++ b/man/clustersRestartAndWait.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{clustersRestartAndWait} +\alias{clustersRestartAndWait} +\title{Restart cluster.} +\usage{ +clustersRestartAndWait( + client, + cluster_id, + restart_user = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be started.} + +\item{restart_user}{\if{html}{\out{}}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Restarts a Spark cluster with the supplied ID. If the cluster is not +currently in a \code{RUNNING} state, nothing will happen. +} diff --git a/man/clustersStart.Rd b/man/clustersStart.Rd index 908a27e0..e6617bb0 100644 --- a/man/clustersStart.Rd +++ b/man/clustersStart.Rd @@ -4,26 +4,18 @@ \alias{clustersStart} \title{Start terminated cluster.} \usage{ -clustersStart(client, cluster_id, timeout = 20, callback = cli_reporter) +clustersStart(client, cluster_id) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} \item{cluster_id}{Required. The cluster to be started.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Starts a terminated Spark cluster with the supplied ID. This works similar to \code{createCluster} except: +} +\details{ \itemize{ \item The previous cluster id and attributes are preserved. * The cluster starts with the last specified cluster size. * If the previous cluster was an diff --git a/man/clustersStartAndWait.Rd b/man/clustersStartAndWait.Rd new file mode 100644 index 00000000..3d0abf48 --- /dev/null +++ b/man/clustersStartAndWait.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{clustersStartAndWait} +\alias{clustersStartAndWait} +\title{Start terminated cluster.} +\usage{ +clustersStartAndWait(client, cluster_id, timeout = 20, callback = cli_reporter) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be started.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Starts a terminated Spark cluster with the supplied ID. This works similar to +\code{createCluster} except: +\itemize{ +\item The previous cluster id and attributes are preserved. * The cluster starts +with the last specified cluster size. * If the previous cluster was an +autoscaling cluster, the current cluster starts with the minimum number of +nodes. * If the cluster is not currently in a \code{TERMINATED} state, nothing +will happen. * Clusters launched to run a job cannot be started. +} +} diff --git a/man/commandExecutionCancel.Rd b/man/commandExecutionCancel.Rd index 38316eb9..7d4c9391 100644 --- a/man/commandExecutionCancel.Rd +++ b/man/commandExecutionCancel.Rd @@ -8,9 +8,7 @@ commandExecutionCancel( client, cluster_id = NULL, command_id = NULL, - context_id = NULL, - timeout = 20, - callback = cli_reporter + context_id = NULL ) } \arguments{ @@ -21,19 +19,10 @@ commandExecutionCancel( \item{command_id}{This field has no description yet.} \item{context_id}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Command Execution on Databricks reach -Cancelled state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Command Execution is reported to console. You can change this behavior -by changing the \code{callback} parameter. +Cancels a currently running command within an execution context. } \details{ -Cancels a currently running command within an execution context. - The command ID is obtained from a prior successful call to \strong{execute}. } diff --git a/man/commandExecutionCancelAndWait.Rd b/man/commandExecutionCancelAndWait.Rd new file mode 100644 index 00000000..f5ea2904 --- /dev/null +++ b/man/commandExecutionCancelAndWait.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{commandExecutionCancelAndWait} +\alias{commandExecutionCancelAndWait} +\title{Cancel a command.} +\usage{ +commandExecutionCancelAndWait( + client, + cluster_id = NULL, + command_id = NULL, + context_id = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{This field has no description yet.} + +\item{command_id}{This field has no description yet.} + +\item{context_id}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Command Execution on Databricks reach +Cancelled state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Command Execution is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Cancels a currently running command within an execution context. + +The command ID is obtained from a prior successful call to \strong{execute}. +} diff --git a/man/commandExecutionCreate.Rd b/man/commandExecutionCreate.Rd index fae681e0..9fd7e516 100644 --- a/man/commandExecutionCreate.Rd +++ b/man/commandExecutionCreate.Rd @@ -4,13 +4,7 @@ \alias{commandExecutionCreate} \title{Create an execution context.} \usage{ -commandExecutionCreate( - client, - cluster_id = NULL, - language = NULL, - timeout = 20, - callback = cli_reporter -) +commandExecutionCreate(client, cluster_id = NULL, language = NULL) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} @@ -18,19 +12,10 @@ commandExecutionCreate( \item{cluster_id}{Running cluster id.} \item{language}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Command Execution on Databricks reach -Running state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Command Execution is reported to console. You can change this behavior -by changing the \code{callback} parameter. +Creates an execution context for running cluster commands. } \details{ -Creates an execution context for running cluster commands. - If successful, this method returns the ID of the new execution context. } diff --git a/man/commandExecutionCreateAndWait.Rd b/man/commandExecutionCreateAndWait.Rd new file mode 100644 index 00000000..e09f15fb --- /dev/null +++ b/man/commandExecutionCreateAndWait.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{commandExecutionCreateAndWait} +\alias{commandExecutionCreateAndWait} +\title{Create an execution context.} +\usage{ +commandExecutionCreateAndWait( + client, + cluster_id = NULL, + language = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Running cluster id.} + +\item{language}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Command Execution on Databricks reach +Running state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Command Execution is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Creates an execution context for running cluster commands. + +If successful, this method returns the ID of the new execution context. +} diff --git a/man/commandExecutionExecute.Rd b/man/commandExecutionExecute.Rd index 2d3a9e1f..e994438b 100644 --- a/man/commandExecutionExecute.Rd +++ b/man/commandExecutionExecute.Rd @@ -9,9 +9,7 @@ commandExecutionExecute( cluster_id = NULL, command = NULL, context_id = NULL, - language = NULL, - timeout = 20, - callback = cli_reporter + language = NULL ) } \arguments{ @@ -24,21 +22,12 @@ commandExecutionExecute( \item{context_id}{Running context id.} \item{language}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Command Execution on Databricks reach -Finished or Error state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Command Execution is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Runs a cluster command in the given execution context, using the provided language. - +} +\details{ If successful, it returns an ID for tracking the status of the command's execution. } diff --git a/man/commandExecutionExecuteAndWait.Rd b/man/commandExecutionExecuteAndWait.Rd new file mode 100644 index 00000000..0ce9e64a --- /dev/null +++ b/man/commandExecutionExecuteAndWait.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{commandExecutionExecuteAndWait} +\alias{commandExecutionExecuteAndWait} +\title{Run a command.} +\usage{ +commandExecutionExecuteAndWait( + client, + cluster_id = NULL, + command = NULL, + context_id = NULL, + language = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Running cluster id.} + +\item{command}{Executable code.} + +\item{context_id}{Running context id.} + +\item{language}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Command Execution on Databricks reach +Finished or Error state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Command Execution is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Runs a cluster command in the given execution context, using the provided +language. + +If successful, it returns an ID for tracking the status of the command's +execution. +} diff --git a/man/jobsCancelRun.Rd b/man/jobsCancelRun.Rd index c5f96a55..08ceb5eb 100644 --- a/man/jobsCancelRun.Rd +++ b/man/jobsCancelRun.Rd @@ -4,24 +4,14 @@ \alias{jobsCancelRun} \title{Cancel a run.} \usage{ -jobsCancelRun(client, run_id, timeout = 20, callback = cli_reporter) +jobsCancelRun(client, run_id) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} \item{run_id}{Required. This field is required.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Cancels a job run or a task run. The run is canceled asynchronously, so it may still be running when this request completes. } diff --git a/man/jobsCancelRunAndWait.Rd b/man/jobsCancelRunAndWait.Rd new file mode 100644 index 00000000..8b926eeb --- /dev/null +++ b/man/jobsCancelRunAndWait.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{jobsCancelRunAndWait} +\alias{jobsCancelRunAndWait} +\title{Cancel a run.} +\usage{ +jobsCancelRunAndWait(client, run_id, timeout = 20, callback = cli_reporter) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. This field is required.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Cancels a job run or a task run. The run is canceled asynchronously, so it +may still be running when this request completes. +} diff --git a/man/jobsGetRun.Rd b/man/jobsGetRun.Rd index 9722416b..02c13718 100644 --- a/man/jobsGetRun.Rd +++ b/man/jobsGetRun.Rd @@ -8,9 +8,7 @@ jobsGetRun( client, run_id, include_history = NULL, - include_resolved_values = NULL, - timeout = 20, - callback = cli_reporter + include_resolved_values = NULL ) } \arguments{ @@ -21,17 +19,7 @@ jobsGetRun( \item{include_history}{Whether to include the repair history in the response.} \item{include_resolved_values}{Whether to include resolved parameter values in the response.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Retrieve the metadata of a run. } diff --git a/man/jobsGetRunAndWait.Rd b/man/jobsGetRunAndWait.Rd new file mode 100644 index 00000000..8513d563 --- /dev/null +++ b/man/jobsGetRunAndWait.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{jobsGetRunAndWait} +\alias{jobsGetRunAndWait} +\title{Get a single job run.} +\usage{ +jobsGetRunAndWait( + client, + run_id, + include_history = NULL, + include_resolved_values = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The canonical identifier of the run for which to retrieve the metadata.} + +\item{include_history}{Whether to include the repair history in the response.} + +\item{include_resolved_values}{Whether to include resolved parameter values in the response.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Retrieve the metadata of a run. +} diff --git a/man/jobsRepairRun.Rd b/man/jobsRepairRun.Rd index e8f7f491..fbcb164c 100644 --- a/man/jobsRepairRun.Rd +++ b/man/jobsRepairRun.Rd @@ -19,9 +19,7 @@ jobsRepairRun( rerun_dependent_tasks = NULL, rerun_tasks = NULL, spark_submit_params = NULL, - sql_params = NULL, - timeout = 20, - callback = cli_reporter + sql_params = NULL ) } \arguments{ @@ -54,18 +52,8 @@ jobsRepairRun( \item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} \item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Re-run one or more tasks. Tasks are re-run as part of the original job run. They use the current job and task settings, and can be viewed in the history for the original job run. diff --git a/man/jobsRepairRunAndWait.Rd b/man/jobsRepairRunAndWait.Rd new file mode 100644 index 00000000..7708b997 --- /dev/null +++ b/man/jobsRepairRunAndWait.Rd @@ -0,0 +1,72 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{jobsRepairRunAndWait} +\alias{jobsRepairRunAndWait} +\title{Repair a job run.} +\usage{ +jobsRepairRunAndWait( + client, + run_id, + dbt_commands = NULL, + jar_params = NULL, + job_parameters = NULL, + latest_repair_id = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + rerun_all_failed_tasks = NULL, + rerun_dependent_tasks = NULL, + rerun_tasks = NULL, + spark_submit_params = NULL, + sql_params = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The job run ID of the run to repair.} + +\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} + +\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} + +\item{job_parameters}{Job-level parameters used in the run.} + +\item{latest_repair_id}{The ID of the latest repair.} + +\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{pipeline_params}{This field has no description yet.} + +\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} + +\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} + +\item{rerun_all_failed_tasks}{If true, repair all failed tasks.} + +\item{rerun_dependent_tasks}{If true, repair all tasks that depend on the tasks in \code{rerun_tasks}, even if they were previously successful.} + +\item{rerun_tasks}{The task keys of the task runs to repair.} + +\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} + +\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Re-run one or more tasks. Tasks are re-run as part of the original job run. +They use the current job and task settings, and can be viewed in the history +for the original job run. +} diff --git a/man/jobsRunNow.Rd b/man/jobsRunNow.Rd index ce4277b0..dfbe9df2 100644 --- a/man/jobsRunNow.Rd +++ b/man/jobsRunNow.Rd @@ -17,9 +17,7 @@ jobsRunNow( python_params = NULL, queue = NULL, spark_submit_params = NULL, - sql_params = NULL, - timeout = 20, - callback = cli_reporter + sql_params = NULL ) } \arguments{ @@ -48,17 +46,7 @@ jobsRunNow( \item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} \item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Run a job and return the \code{run_id} of the triggered run. } diff --git a/man/jobsRunNowAndWait.Rd b/man/jobsRunNowAndWait.Rd new file mode 100644 index 00000000..3a4131af --- /dev/null +++ b/man/jobsRunNowAndWait.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{jobsRunNowAndWait} +\alias{jobsRunNowAndWait} +\title{Trigger a new job run.} +\usage{ +jobsRunNowAndWait( + client, + job_id, + dbt_commands = NULL, + idempotency_token = NULL, + jar_params = NULL, + job_parameters = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + queue = NULL, + spark_submit_params = NULL, + sql_params = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The ID of the job to be executed.} + +\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} + +\item{idempotency_token}{An optional token to guarantee the idempotency of job run requests.} + +\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} + +\item{job_parameters}{Job-level parameters used in the run.} + +\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{pipeline_params}{This field has no description yet.} + +\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} + +\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} + +\item{queue}{The queue settings of the run.} + +\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} + +\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Run a job and return the \code{run_id} of the triggered run. +} diff --git a/man/jobsSubmit.Rd b/man/jobsSubmit.Rd index 01d2839c..58fa92d8 100644 --- a/man/jobsSubmit.Rd +++ b/man/jobsSubmit.Rd @@ -16,9 +16,7 @@ jobsSubmit( run_name = NULL, tasks = NULL, timeout_seconds = NULL, - webhook_notifications = NULL, - timeout = 20, - callback = cli_reporter + webhook_notifications = NULL ) } \arguments{ @@ -45,18 +43,8 @@ jobsSubmit( \item{timeout_seconds}{An optional timeout applied to each run of this job.} \item{webhook_notifications}{A collection of system notification IDs to notify when the run begins or completes.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Submit a one-time run. This endpoint allows you to submit a workload directly without creating a job. Runs submitted using this endpoint don’t display in the UI. Use the \code{jobs/runs/get} API to check the run state after the job is diff --git a/man/jobsSubmitAndWait.Rd b/man/jobsSubmitAndWait.Rd new file mode 100644 index 00000000..244c3fd0 --- /dev/null +++ b/man/jobsSubmitAndWait.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{jobsSubmitAndWait} +\alias{jobsSubmitAndWait} +\title{Create and trigger a one-time run.} +\usage{ +jobsSubmitAndWait( + client, + access_control_list = NULL, + email_notifications = NULL, + git_source = NULL, + health = NULL, + idempotency_token = NULL, + notification_settings = NULL, + queue = NULL, + run_name = NULL, + tasks = NULL, + timeout_seconds = NULL, + webhook_notifications = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{List of permissions to set on the job.} + +\item{email_notifications}{An optional set of email addresses notified when the run begins or completes.} + +\item{git_source}{An optional specification for a remote Git repository containing the source code used by tasks.} + +\item{health}{An optional set of health rules that can be defined for this job.} + +\item{idempotency_token}{An optional token that can be used to guarantee the idempotency of job run requests.} + +\item{notification_settings}{Optional notification settings that are used when sending notifications to each of the \code{email_notifications} and \code{webhook_notifications} for this run.} + +\item{queue}{The queue settings of the one-time run.} + +\item{run_name}{An optional name for the run.} + +\item{tasks}{This field has no description yet.} + +\item{timeout_seconds}{An optional timeout applied to each run of this job.} + +\item{webhook_notifications}{A collection of system notification IDs to notify when the run begins or completes.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Submit a one-time run. This endpoint allows you to submit a workload directly +without creating a job. Runs submitted using this endpoint don’t display in +the UI. Use the \code{jobs/runs/get} API to check the run state after the job is +submitted. +} diff --git a/man/pipelinesStop.Rd b/man/pipelinesStop.Rd index 25e02641..a1595b35 100644 --- a/man/pipelinesStop.Rd +++ b/man/pipelinesStop.Rd @@ -4,24 +4,14 @@ \alias{pipelinesStop} \title{Stop a pipeline.} \usage{ -pipelinesStop(client, pipeline_id, timeout = 20, callback = cli_reporter) +pipelinesStop(client, pipeline_id) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} \item{pipeline_id}{Required. This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Pipelines on Databricks reach -IDLE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Pipelines is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Stops the pipeline by canceling the active update. If there is no active update for the pipeline, this request is a no-op. } diff --git a/man/pipelinesStopAndWait.Rd b/man/pipelinesStopAndWait.Rd new file mode 100644 index 00000000..440d7c5a --- /dev/null +++ b/man/pipelinesStopAndWait.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{pipelinesStopAndWait} +\alias{pipelinesStopAndWait} +\title{Stop a pipeline.} +\usage{ +pipelinesStopAndWait( + client, + pipeline_id, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Pipelines on Databricks reach +IDLE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Pipelines is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Stops the pipeline by canceling the active update. If there is no active +update for the pipeline, this request is a no-op. +} diff --git a/man/servingEndpointsCreate.Rd b/man/servingEndpointsCreate.Rd index 784b0109..243fec96 100644 --- a/man/servingEndpointsCreate.Rd +++ b/man/servingEndpointsCreate.Rd @@ -4,15 +4,7 @@ \alias{servingEndpointsCreate} \title{Create a new serving endpoint.} \usage{ -servingEndpointsCreate( - client, - name, - config, - rate_limits = NULL, - tags = NULL, - timeout = 20, - callback = cli_reporter -) +servingEndpointsCreate(client, name, config, rate_limits = NULL, tags = NULL) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} @@ -24,14 +16,7 @@ servingEndpointsCreate( \item{rate_limits}{Rate limits to be applied to the serving endpoint.} \item{tags}{Tags to be attached to the serving endpoint and automatically propagated to billing logs.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Serving Endpoints on Databricks reach -NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior -by changing the \code{callback} parameter. +Create a new serving endpoint. } diff --git a/man/servingEndpointsCreateAndWait.Rd b/man/servingEndpointsCreateAndWait.Rd new file mode 100644 index 00000000..c9c92574 --- /dev/null +++ b/man/servingEndpointsCreateAndWait.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{servingEndpointsCreateAndWait} +\alias{servingEndpointsCreateAndWait} +\title{Create a new serving endpoint.} +\usage{ +servingEndpointsCreateAndWait( + client, + name, + config, + rate_limits = NULL, + tags = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint.} + +\item{config}{Required. The core config of the serving endpoint.} + +\item{rate_limits}{Rate limits to be applied to the serving endpoint.} + +\item{tags}{Tags to be attached to the serving endpoint and automatically propagated to billing logs.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Serving Endpoints on Databricks reach +NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} diff --git a/man/servingEndpointsUpdateConfig.Rd b/man/servingEndpointsUpdateConfig.Rd index 569512b5..ace0bb6d 100644 --- a/man/servingEndpointsUpdateConfig.Rd +++ b/man/servingEndpointsUpdateConfig.Rd @@ -10,9 +10,7 @@ servingEndpointsUpdateConfig( auto_capture_config = NULL, served_entities = NULL, served_models = NULL, - traffic_config = NULL, - timeout = 20, - callback = cli_reporter + traffic_config = NULL ) } \arguments{ @@ -27,18 +25,8 @@ servingEndpointsUpdateConfig( \item{served_models}{(Deprecated, use served_entities instead) A list of served models for the endpoint to serve.} \item{traffic_config}{The traffic config defining how invocations to the serving endpoint should be routed.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Serving Endpoints on Databricks reach -NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Updates any combination of the serving endpoint's served entities, the compute configuration of those served entities, and the endpoint's traffic config. An endpoint that already has an update in progress can not be updated diff --git a/man/servingEndpointsUpdateConfigAndWait.Rd b/man/servingEndpointsUpdateConfigAndWait.Rd new file mode 100644 index 00000000..c65abd5e --- /dev/null +++ b/man/servingEndpointsUpdateConfigAndWait.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{servingEndpointsUpdateConfigAndWait} +\alias{servingEndpointsUpdateConfigAndWait} +\title{Update config of a serving endpoint.} +\usage{ +servingEndpointsUpdateConfigAndWait( + client, + name, + auto_capture_config = NULL, + served_entities = NULL, + served_models = NULL, + traffic_config = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint to update.} + +\item{auto_capture_config}{Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.} + +\item{served_entities}{A list of served entities for the endpoint to serve.} + +\item{served_models}{(Deprecated, use served_entities instead) A list of served models for the endpoint to serve.} + +\item{traffic_config}{The traffic config defining how invocations to the serving endpoint should be routed.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Serving Endpoints on Databricks reach +NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Updates any combination of the serving endpoint's served entities, the +compute configuration of those served entities, and the endpoint's traffic +config. An endpoint that already has an update in progress can not be updated +until the current update completes or fails. +} diff --git a/man/vectorSearchEndpointsCreateEndpoint.Rd b/man/vectorSearchEndpointsCreateEndpoint.Rd index 7e15f739..6571a128 100644 --- a/man/vectorSearchEndpointsCreateEndpoint.Rd +++ b/man/vectorSearchEndpointsCreateEndpoint.Rd @@ -4,13 +4,7 @@ \alias{vectorSearchEndpointsCreateEndpoint} \title{Create an endpoint.} \usage{ -vectorSearchEndpointsCreateEndpoint( - client, - name, - endpoint_type, - timeout = 20, - callback = cli_reporter -) +vectorSearchEndpointsCreateEndpoint(client, name, endpoint_type) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} @@ -18,17 +12,7 @@ vectorSearchEndpointsCreateEndpoint( \item{name}{Required. Name of endpoint.} \item{endpoint_type}{Required. Type of endpoint.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Vector Search Endpoints on Databricks reach -ONLINE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Vector Search Endpoints is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Create a new endpoint. } diff --git a/man/vectorSearchEndpointsCreateEndpointAndWait.Rd b/man/vectorSearchEndpointsCreateEndpointAndWait.Rd new file mode 100644 index 00000000..18a266df --- /dev/null +++ b/man/vectorSearchEndpointsCreateEndpointAndWait.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_endpoints.R +\name{vectorSearchEndpointsCreateEndpointAndWait} +\alias{vectorSearchEndpointsCreateEndpointAndWait} +\title{Create an endpoint.} +\usage{ +vectorSearchEndpointsCreateEndpointAndWait( + client, + name, + endpoint_type, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of endpoint.} + +\item{endpoint_type}{Required. Type of endpoint.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Vector Search Endpoints on Databricks reach +ONLINE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Vector Search Endpoints is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Create a new endpoint. +} diff --git a/man/warehousesCreate.Rd b/man/warehousesCreate.Rd index f2a1b78f..48be7805 100644 --- a/man/warehousesCreate.Rd +++ b/man/warehousesCreate.Rd @@ -18,9 +18,7 @@ warehousesCreate( name = NULL, spot_instance_policy = NULL, tags = NULL, - warehouse_type = NULL, - timeout = 20, - callback = cli_reporter + warehouse_type = NULL ) } \arguments{ @@ -51,17 +49,7 @@ warehousesCreate( \item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} \item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Creates a new SQL warehouse. } diff --git a/man/warehousesCreateAndWait.Rd b/man/warehousesCreateAndWait.Rd new file mode 100644 index 00000000..59d4cca8 --- /dev/null +++ b/man/warehousesCreateAndWait.Rd @@ -0,0 +1,67 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{warehousesCreateAndWait} +\alias{warehousesCreateAndWait} +\title{Create a warehouse.} +\usage{ +warehousesCreateAndWait( + client, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} + +\item{channel}{Channel Details.} + +\item{cluster_size}{Size of the clusters allocated for this warehouse.} + +\item{creator_name}{warehouse creator name.} + +\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} + +\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} + +\item{instance_profile_arn}{Deprecated.} + +\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} + +\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} + +\item{name}{Logical name for the cluster.} + +\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} + +\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} + +\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Creates a new SQL warehouse. +} diff --git a/man/warehousesEdit.Rd b/man/warehousesEdit.Rd index 7d2f7abd..f1a13846 100644 --- a/man/warehousesEdit.Rd +++ b/man/warehousesEdit.Rd @@ -19,9 +19,7 @@ warehousesEdit( name = NULL, spot_instance_policy = NULL, tags = NULL, - warehouse_type = NULL, - timeout = 20, - callback = cli_reporter + warehouse_type = NULL ) } \arguments{ @@ -54,17 +52,7 @@ warehousesEdit( \item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} \item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Updates the configuration for a SQL warehouse. } diff --git a/man/warehousesEditAndWait.Rd b/man/warehousesEditAndWait.Rd new file mode 100644 index 00000000..69952810 --- /dev/null +++ b/man/warehousesEditAndWait.Rd @@ -0,0 +1,70 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{warehousesEditAndWait} +\alias{warehousesEditAndWait} +\title{Update a warehouse.} +\usage{ +warehousesEditAndWait( + client, + id, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} + +\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} + +\item{channel}{Channel Details.} + +\item{cluster_size}{Size of the clusters allocated for this warehouse.} + +\item{creator_name}{warehouse creator name.} + +\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} + +\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} + +\item{instance_profile_arn}{Deprecated.} + +\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} + +\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} + +\item{name}{Logical name for the cluster.} + +\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} + +\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} + +\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Updates the configuration for a SQL warehouse. +} diff --git a/man/warehousesStart.Rd b/man/warehousesStart.Rd index 50af94d9..0fa6f51e 100644 --- a/man/warehousesStart.Rd +++ b/man/warehousesStart.Rd @@ -4,23 +4,13 @@ \alias{warehousesStart} \title{Start a warehouse.} \usage{ -warehousesStart(client, id, timeout = 20, callback = cli_reporter) +warehousesStart(client, id) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} \item{id}{Required. Required.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Starts a SQL warehouse. } diff --git a/man/warehousesStartAndWait.Rd b/man/warehousesStartAndWait.Rd new file mode 100644 index 00000000..3fa813ec --- /dev/null +++ b/man/warehousesStartAndWait.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{warehousesStartAndWait} +\alias{warehousesStartAndWait} +\title{Start a warehouse.} +\usage{ +warehousesStartAndWait(client, id, timeout = 20, callback = cli_reporter) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Starts a SQL warehouse. +} diff --git a/man/warehousesStop.Rd b/man/warehousesStop.Rd index c29a212f..8760ad17 100644 --- a/man/warehousesStop.Rd +++ b/man/warehousesStop.Rd @@ -4,23 +4,13 @@ \alias{warehousesStop} \title{Stop a warehouse.} \usage{ -warehousesStop(client, id, timeout = 20, callback = cli_reporter) +warehousesStop(client, id) } \arguments{ \item{client}{Required. Instance of DatabricksClient()} \item{id}{Required. Required.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} } \description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -STOPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ Stops a SQL warehouse. } diff --git a/man/warehousesStopAndWait.Rd b/man/warehousesStopAndWait.Rd new file mode 100644 index 00000000..8b0b6f0e --- /dev/null +++ b/man/warehousesStopAndWait.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{warehousesStopAndWait} +\alias{warehousesStopAndWait} +\title{Stop a warehouse.} +\usage{ +warehousesStopAndWait(client, id, timeout = 20, callback = cli_reporter) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +STOPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Stops a SQL warehouse. +}