-
Notifications
You must be signed in to change notification settings - Fork 37
/
interface.go
executable file
·590 lines (521 loc) · 24.3 KB
/
interface.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package compute
import (
"context"
)
// Cluster policy limits the ability to configure clusters based on a set of
// rules. The policy rules limit the attributes or attribute values available
// for cluster creation. Cluster policies have ACLs that limit their use to
// specific users and groups.
//
// Cluster policies let you limit users to create clusters with prescribed
// settings, simplify the user interface and enable more users to create their
// own clusters (by fixing and hiding some values), control cost by limiting per
// cluster maximum cost (by setting limits on attributes whose values contribute
// to hourly price).
//
// Cluster policy permissions limit which policies a user can select in the
// Policy drop-down when the user creates a cluster: - A user who has cluster
// create permission can select the Unrestricted policy and create
// fully-configurable clusters. - A user who has both cluster create permission
// and access to cluster policies can select the Unrestricted policy and
// policies they have access to. - A user that has access to only cluster
// policies, can select the policies they have access to.
//
// If no policies have been created in the workspace, the Policy drop-down does
// not display.
//
// Only admin users can create, edit, and delete policies. Admin users also have
// access to all policies.
type ClusterPoliciesService interface {
// Create a new policy.
//
// Creates a new policy with prescribed settings.
Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error)
// Delete a cluster policy.
//
// Delete a policy for a cluster. Clusters governed by this policy can still
// run, but cannot be edited.
Delete(ctx context.Context, request DeletePolicy) error
// Update a cluster policy.
//
// Update an existing policy for cluster. This operation may make some
// clusters governed by the previous policy invalid.
Edit(ctx context.Context, request EditPolicy) error
// Get a cluster policy.
//
// Get a cluster policy entity. Creation and editing is available to admins
// only.
Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error)
// Get cluster policy permission levels.
//
// Gets the permission levels that a user can have on an object.
GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error)
// Get cluster policy permissions.
//
// Gets the permissions of a cluster policy. Cluster policies can inherit
// permissions from their root object.
GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)
// List cluster policies.
//
// Returns a list of policies accessible by the requesting user.
//
// Use ListAll() to get all Policy instances
List(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error)
// Set cluster policy permissions.
//
// Sets permissions on a cluster policy. Cluster policies can inherit
// permissions from their root object.
SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)
// Update cluster policy permissions.
//
// Updates the permissions on a cluster policy. Cluster policies can inherit
// permissions from their root object.
UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)
}
// The Clusters API allows you to create, start, edit, list, terminate, and
// delete clusters.
//
// Databricks maps cluster node instance types to compute units known as DBUs.
// See the instance type pricing page for a list of the supported instance types
// and their corresponding DBUs.
//
// A Databricks cluster is a set of computation resources and configurations on
// which you run data engineering, data science, and data analytics workloads,
// such as production ETL pipelines, streaming analytics, ad-hoc analytics, and
// machine learning.
//
// You run these workloads as a set of commands in a notebook or as an automated
// job. Databricks makes a distinction between all-purpose clusters and job
// clusters. You use all-purpose clusters to analyze data collaboratively using
// interactive notebooks. You use job clusters to run fast and robust automated
// jobs.
//
// You can create an all-purpose cluster using the UI, CLI, or REST API. You can
// manually terminate and restart an all-purpose cluster. Multiple users can
// share such clusters to do collaborative interactive analysis.
//
// IMPORTANT: Databricks retains cluster configuration information for up to 200
// all-purpose clusters terminated in the last 30 days and up to 30 job clusters
// recently terminated by the job scheduler. To keep an all-purpose cluster
// configuration even after it has been terminated for more than 30 days, an
// administrator can pin a cluster to the cluster list.
type ClustersService interface {
// Change cluster owner.
//
// Change the owner of the cluster. You must be an admin to perform this
// operation.
ChangeOwner(ctx context.Context, request ChangeClusterOwner) error
// Create new cluster.
//
// Creates a new Spark cluster. This method will acquire new instances from
// the cloud provider if necessary. Note: Databricks may not be able to
// acquire some of the requested nodes, due to cloud provider limitations
// (account limits, spot price, etc.) or transient network issues.
//
// If Databricks acquires at least 85% of the requested on-demand nodes,
// cluster creation will succeed. Otherwise the cluster will terminate with
// an informative error message.
Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error)
// Terminate cluster.
//
// Terminates the Spark cluster with the specified ID. The cluster is
// removed asynchronously. Once the termination has completed, the cluster
// will be in a `TERMINATED` state. If the cluster is already in a
// `TERMINATING` or `TERMINATED` state, nothing will happen.
Delete(ctx context.Context, request DeleteCluster) error
// Update cluster configuration.
//
// Updates the configuration of a cluster to match the provided attributes
// and size. A cluster can be updated if it is in a `RUNNING` or
// `TERMINATED` state.
//
// If a cluster is updated while in a `RUNNING` state, it will be restarted
// so that the new attributes can take effect.
//
// If a cluster is updated while in a `TERMINATED` state, it will remain
// `TERMINATED`. The next time it is started using the `clusters/start` API,
// the new attributes will take effect. Any attempt to update a cluster in
// any other state will be rejected with an `INVALID_STATE` error code.
//
// Clusters created by the Databricks Jobs service cannot be edited.
Edit(ctx context.Context, request EditCluster) error
// List cluster activity events.
//
// Retrieves a list of events about the activity of a cluster. This API is
// paginated. If there are more events to read, the response includes all
// the nparameters necessary to request the next page of events.
//
// Use EventsAll() to get all ClusterEvent instances, which will iterate over every result page.
Events(ctx context.Context, request GetEvents) (*GetEventsResponse, error)
// Get cluster info.
//
// Retrieves the information for a cluster given its identifier. Clusters
// can be described while they are running, or up to 60 days after they are
// terminated.
Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error)
// Get cluster permission levels.
//
// Gets the permission levels that a user can have on an object.
GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error)
// Get cluster permissions.
//
// Gets the permissions of a cluster. Clusters can inherit permissions from
// their root object.
GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error)
// List all clusters.
//
// Return information about all pinned clusters, active clusters, up to 200
// of the most recently terminated all-purpose clusters in the past 30 days,
// and up to 30 of the most recently terminated job clusters in the past 30
// days.
//
// For example, if there is 1 pinned cluster, 4 active clusters, 45
// terminated all-purpose clusters in the past 30 days, and 50 terminated
// job clusters in the past 30 days, then this API returns the 1 pinned
// cluster, 4 active clusters, all 45 terminated all-purpose clusters, and
// the 30 most recently terminated job clusters.
//
// Use ListAll() to get all ClusterDetails instances
List(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error)
// List node types.
//
// Returns a list of supported Spark node types. These node types can be
// used to launch a cluster.
ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error)
// List availability zones.
//
// Returns a list of availability zones where clusters can be created in
// (For example, us-west-2a). These zones can be used to launch a cluster.
ListZones(ctx context.Context) (*ListAvailableZonesResponse, error)
// Permanently delete cluster.
//
// Permanently deletes a Spark cluster. This cluster is terminated and
// resources are asynchronously removed.
//
// In addition, users will no longer see permanently deleted clusters in the
// cluster list, and API users can no longer perform any action on
// permanently deleted clusters.
PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error
// Pin cluster.
//
// Pinning a cluster ensures that the cluster will always be returned by the
// ListClusters API. Pinning a cluster that is already pinned will have no
// effect. This API can only be called by workspace admins.
Pin(ctx context.Context, request PinCluster) error
// Resize cluster.
//
// Resizes a cluster to have a desired number of workers. This will fail
// unless the cluster is in a `RUNNING` state.
Resize(ctx context.Context, request ResizeCluster) error
// Restart cluster.
//
// Restarts a Spark cluster with the supplied ID. If the cluster is not
// currently in a `RUNNING` state, nothing will happen.
Restart(ctx context.Context, request RestartCluster) error
// Set cluster permissions.
//
// Sets permissions on a cluster. Clusters can inherit permissions from
// their root object.
SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)
// List available Spark versions.
//
// Returns the list of available Spark versions. These versions can be used
// to launch a cluster.
SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error)
// Start terminated cluster.
//
// Starts a terminated Spark cluster with the supplied ID. This works
// similar to `createCluster` except:
//
// * The previous cluster id and attributes are preserved. * The cluster
// starts with the last specified cluster size. * If the previous cluster
// was an autoscaling cluster, the current cluster starts with the minimum
// number of nodes. * If the cluster is not currently in a `TERMINATED`
// state, nothing will happen. * Clusters launched to run a job cannot be
// started.
Start(ctx context.Context, request StartCluster) error
// Unpin cluster.
//
// Unpinning a cluster will allow the cluster to eventually be removed from
// the ListClusters API. Unpinning a cluster that is not pinned will have no
// effect. This API can only be called by workspace admins.
Unpin(ctx context.Context, request UnpinCluster) error
// Update cluster permissions.
//
// Updates the permissions on a cluster. Clusters can inherit permissions
// from their root object.
UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)
}
// This API allows execution of Python, Scala, SQL, or R commands on running
// Databricks Clusters.
type CommandExecutionService interface {
// Cancel a command.
//
// Cancels a currently running command within an execution context.
//
// The command ID is obtained from a prior successful call to __execute__.
Cancel(ctx context.Context, request CancelCommand) error
// Get command info.
//
// Gets the status of and, if available, the results from a currently
// executing command.
//
// The command ID is obtained from a prior successful call to __execute__.
CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error)
// Get status.
//
// Gets the status for an execution context.
ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error)
// Create an execution context.
//
// Creates an execution context for running cluster commands.
//
// If successful, this method returns the ID of the new execution context.
Create(ctx context.Context, request CreateContext) (*Created, error)
// Delete an execution context.
//
// Deletes an execution context.
Destroy(ctx context.Context, request DestroyContext) error
// Run a command.
//
// Runs a cluster command in the given execution context, using the provided
// language.
//
// If successful, it returns an ID for tracking the status of the command's
// execution.
Execute(ctx context.Context, request Command) (*Created, error)
}
// The Global Init Scripts API enables Workspace administrators to configure
// global initialization scripts for their workspace. These scripts run on every
// node in every cluster in the workspace.
//
// **Important:** Existing clusters must be restarted to pick up any changes
// made to global init scripts. Global init scripts are run in order. If the
// init script returns with a bad exit code, the Apache Spark container fails to
// launch and init scripts with later position are skipped. If enough containers
// fail, the entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` error
// code.
type GlobalInitScriptsService interface {
// Create init script.
//
// Creates a new global init script in this workspace.
Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error)
// Delete init script.
//
// Deletes a global init script.
Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error
// Get an init script.
//
// Gets all the details of a script, including its Base64-encoded contents.
Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error)
// Get init scripts.
//
// Get a list of all global init scripts for this workspace. This returns
// all properties for each script but **not** the script contents. To
// retrieve the contents of a script, use the [get a global init
// script](#operation/get-script) operation.
//
// Use ListAll() to get all GlobalInitScriptDetails instances
List(ctx context.Context) (*ListGlobalInitScriptsResponse, error)
// Update init script.
//
// Updates a global init script, specifying only the fields to change. All
// fields are optional. Unspecified fields retain their current value.
Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error
}
// Instance Pools API are used to create, edit, delete and list instance pools
// by using ready-to-use cloud instances which reduces a cluster start and
// auto-scaling times.
//
// Databricks pools reduce cluster start and auto-scaling times by maintaining a
// set of idle, ready-to-use instances. When a cluster is attached to a pool,
// cluster nodes are created using the pool’s idle instances. If the pool has
// no idle instances, the pool expands by allocating a new instance from the
// instance provider in order to accommodate the cluster’s request. When a
// cluster releases an instance, it returns to the pool and is free for another
// cluster to use. Only clusters attached to a pool can use that pool’s idle
// instances.
//
// You can specify a different pool for the driver node and worker nodes, or use
// the same pool for both.
//
// Databricks does not charge DBUs while instances are idle in the pool.
// Instance provider billing does apply. See pricing.
type InstancePoolsService interface {
// Create a new instance pool.
//
// Creates a new instance pool using idle and ready-to-use cloud instances.
Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error)
// Delete an instance pool.
//
// Deletes the instance pool permanently. The idle instances in the pool are
// terminated asynchronously.
Delete(ctx context.Context, request DeleteInstancePool) error
// Edit an existing instance pool.
//
// Modifies the configuration of an existing instance pool.
Edit(ctx context.Context, request EditInstancePool) error
// Get instance pool information.
//
// Retrieve the information for an instance pool based on its identifier.
Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error)
// Get instance pool permission levels.
//
// Gets the permission levels that a user can have on an object.
GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error)
// Get instance pool permissions.
//
// Gets the permissions of an instance pool. Instance pools can inherit
// permissions from their root object.
GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error)
// List instance pool info.
//
// Gets a list of instance pools with their statistics.
//
// Use ListAll() to get all InstancePoolAndStats instances
List(ctx context.Context) (*ListInstancePools, error)
// Set instance pool permissions.
//
// Sets permissions on an instance pool. Instance pools can inherit
// permissions from their root object.
SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error)
// Update instance pool permissions.
//
// Updates the permissions on an instance pool. Instance pools can inherit
// permissions from their root object.
UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error)
}
// The Instance Profiles API allows admins to add, list, and remove instance
// profiles that users can launch clusters with. Regular users can list the
// instance profiles available to them. See [Secure access to S3 buckets] using
// instance profiles for more information.
//
// [Secure access to S3 buckets]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/instance-profiles.html
type InstanceProfilesService interface {
// Register an instance profile.
//
// In the UI, you can select the instance profile when launching clusters.
// This API is only available to admin users.
Add(ctx context.Context, request AddInstanceProfile) error
// Edit an instance profile.
//
// The only supported field to change is the optional IAM role ARN
// associated with the instance profile. It is required to specify the IAM
// role ARN if both of the following are true:
//
// * Your role name and instance profile name do not match. The name is the
// part after the last slash in each ARN. * You want to use the instance
// profile with [Databricks SQL Serverless].
//
// To understand where these fields are in the AWS console, see [Enable
// serverless SQL warehouses].
//
// This API is only available to admin users.
//
// [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html
// [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html
Edit(ctx context.Context, request InstanceProfile) error
// List available instance profiles.
//
// List the instance profiles that the calling user can use to launch a
// cluster.
//
// This API is available to all users.
//
// Use ListAll() to get all InstanceProfile instances
List(ctx context.Context) (*ListInstanceProfilesResponse, error)
// Remove the instance profile.
//
// Remove the instance profile with the provided ARN. Existing clusters with
// this instance profile will continue to function.
//
// This API is only accessible to admin users.
Remove(ctx context.Context, request RemoveInstanceProfile) error
}
// The Libraries API allows you to install and uninstall libraries and get the
// status of libraries on a cluster.
//
// To make third-party or custom code available to notebooks and jobs running on
// your clusters, you can install a library. Libraries can be written in Python,
// Java, Scala, and R. You can upload Java, Scala, and Python libraries and
// point to external packages in PyPI, Maven, and CRAN repositories.
//
// Cluster libraries can be used by all notebooks running on a cluster. You can
// install a cluster library directly from a public repository such as PyPI or
// Maven, using a previously installed workspace library, or using an init
// script.
//
// When you install a library on a cluster, a notebook already attached to that
// cluster will not immediately see the new library. You must first detach and
// then reattach the notebook to the cluster.
//
// When you uninstall a library from a cluster, the library is removed only when
// you restart the cluster. Until you restart the cluster, the status of the
// uninstalled library appears as Uninstall pending restart.
type LibrariesService interface {
// Get all statuses.
//
// Get the status of all libraries on all clusters. A status will be
// available for all libraries installed on this cluster via the API or the
// libraries UI as well as libraries set to be installed on all clusters via
// the libraries UI.
AllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error)
// Get status.
//
// Get the status of libraries on a cluster. A status will be available for
// all libraries installed on this cluster via the API or the libraries UI
// as well as libraries set to be installed on all clusters via the
// libraries UI. The order of returned libraries will be as follows.
//
// 1. Libraries set to be installed on this cluster will be returned first.
// Within this group, the final order will be order in which the libraries
// were added to the cluster.
//
// 2. Libraries set to be installed on all clusters are returned next.
// Within this group there is no order guarantee.
//
// 3. Libraries that were previously requested on this cluster or on all
// clusters, but now marked for removal. Within this group there is no order
// guarantee.
//
// Use ClusterStatusAll() to get all LibraryFullStatus instances
ClusterStatus(ctx context.Context, request ClusterStatusRequest) (*ClusterLibraryStatuses, error)
// Add a library.
//
// Add libraries to be installed on a cluster. The installation is
// asynchronous; it happens in the background after the completion of this
// request.
//
// **Note**: The actual set of libraries to be installed on a cluster is the
// union of the libraries specified via this method and the libraries set to
// be installed on all clusters via the libraries UI.
Install(ctx context.Context, request InstallLibraries) error
// Uninstall libraries.
//
// Set libraries to be uninstalled on a cluster. The libraries won't be
// uninstalled until the cluster is restarted. Uninstalling libraries that
// are not installed on the cluster will have no impact but is not an error.
Uninstall(ctx context.Context, request UninstallLibraries) error
}
// View available policy families. A policy family contains a policy definition
// providing best practices for configuring clusters for a particular use case.
//
// Databricks manages and provides policy families for several common cluster
// use cases. You cannot create, edit, or delete policy families.
//
// Policy families cannot be used directly to create clusters. Instead, you
// create cluster policies using a policy family. Cluster policies created using
// a policy family inherit the policy family's policy definition.
type PolicyFamiliesService interface {
// Get policy family information.
//
// Retrieve the information for an policy family based on its identifier.
Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error)
// List policy families.
//
// Retrieve a list of policy families. This API is paginated.
//
// Use ListAll() to get all PolicyFamily instances, which will iterate over every result page.
List(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error)
}