diff --git a/client/model/cluster.go b/client/model/cluster.go index beca46830..ba3636e7e 100644 --- a/client/model/cluster.go +++ b/client/model/cluster.go @@ -1,82 +1,84 @@ package model -import "errors" - +// AutoScale is a struct the describes auto scaling for clusters type AutoScale struct { MinWorkers int32 `json:"min_workers,omitempty"` MaxWorkers int32 `json:"max_workers,omitempty"` } +// AwsAvailability is a type for describing AWS availability on cluster nodes type AwsAvailability string const ( + // AwsAvailabilitySpot is spot instance type for clusters AwsAvailabilitySpot = "SPOT" + // AwsAvailabilityOnDemand is OnDemand instance type for clusters AwsAvailabilityOnDemand = "ON_DEMAND" + // AwsAvailabilitySpotWithFallback is Spot instance type for clusters with option + // to fallback into on-demand if instance cannot be acquired AwsAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK" ) -func GetAwsAvailability(val string) (AwsAvailability, error) { - switch val { - case "SPOT": - return AwsAvailabilitySpot, nil - case "ON_DEMAND": - return AwsAvailabilityOnDemand, nil - case "SPOT_WITH_FALLBACK": - return AwsAvailabilitySpotWithFallback, nil - } - return "", errors.New("No Match!") -} - +// AzureDiskVolumeType is disk type on azure vms type AzureDiskVolumeType string const ( + // AzureDiskVolumeTypeStandard is for standard local redundant storage AzureDiskVolumeTypeStandard = "STANDARD_LRS" + // AzureDiskVolumeTypePremium is for premium local redundant storage AzureDiskVolumeTypePremium = "PREMIUM_LRS" ) -func GetAzureDiskVolumeType(val string) (AzureDiskVolumeType, error) { - switch val { - case "STANDARD_LRS": - return AzureDiskVolumeTypeStandard, nil - case "PREMIUM_LRS": - return AzureDiskVolumeTypePremium, nil - } - return "", errors.New("No Match!") -} - +// EbsVolumeType is disk type on aws vms type EbsVolumeType string const ( + // EbsVolumeTypeGeneralPurposeSsd is general purpose ssd (starts at 32 gb) EbsVolumeTypeGeneralPurposeSsd = "GENERAL_PURPOSE_SSD" + // EbsVolumeTypeThroughputOptimizedHdd is throughput optimized hdd (starts at 500 gb) EbsVolumeTypeThroughputOptimizedHdd = "THROUGHPUT_OPTIMIZED_HDD" ) -func GetEbsVolumeType(val string) (EbsVolumeType, error) { - switch val { - case "GENERAL_PURPOSE_SSD": - return EbsVolumeTypeGeneralPurposeSsd, nil - case "THROUGHPUT_OPTIMIZED_HDD": - return EbsVolumeTypeThroughputOptimizedHdd, nil - } - return "", errors.New("No Match!") -} +// ClusterState is for describing possible cluster states type ClusterState string const ( + // ClusterStatePending is for PENDING state ClusterStatePending = "PENDING" + + // ClusterStateRunning is for RUNNING state ClusterStateRunning = "RUNNING" + + // ClusterStateRestarting is for RESTARTING state ClusterStateRestarting = "RESTARTING" + + // ClusterStateResizing is for RESIZING state ClusterStateResizing = "RESIZING" + + // ClusterStateTerminating is for TERMINATING state ClusterStateTerminating = "TERMINATING" + + // ClusterStateTerminated is for TERMINATED state ClusterStateTerminated = "TERMINATED" + + // ClusterStateError is for ERROR state ClusterStateError = "ERROR" + + // ClusterStateUnknown is for UNKNOWN state ClusterStateUnknown = "UNKNOWN" ) +// ClusterStateNonRunnable is a list of states in which the cluster cannot go back into running by itself +// without user intervention var ClusterStateNonRunnable = []ClusterState{ClusterStateTerminating, ClusterStateTerminated, ClusterStateError, ClusterStateUnknown} + +// ClusterStateNonTerminating is a list of states in which the cluster cannot go back into terminated by itself +//// without user intervention var ClusterStateNonTerminating = []ClusterState{ClusterStatePending, ClusterStateRunning, ClusterStateRestarting, ClusterStateResizing, ClusterStateUnknown} +// ContainsClusterState given a set of cluster states and a search state it will return true if the state is in the +// given set func ContainsClusterState(clusterStates []ClusterState, searchState ClusterState) bool { for _, state := range clusterStates { if state == searchState { @@ -86,11 +88,13 @@ func ContainsClusterState(clusterStates []ClusterState, searchState ClusterState return false } +// ZonesInfo encapsulates the zone information from the zones api call type ZonesInfo struct { Zones []string `json:"zones,omitempty"` DefaultZone string `json:"default_zone,omitempty"` } +// AwsAttributes encapsulates the aws attributes for aws based clusters type AwsAttributes struct { FirstOnDemand int32 `json:"first_on_demand,omitempty"` Availability AwsAvailability `json:"availability,omitempty"` @@ -102,10 +106,12 @@ type AwsAttributes struct { EbsVolumeSize int32 `json:"ebs_volume_size,omitempty"` } +// DbfsStorageInfo contains the destination string for DBFS type DbfsStorageInfo struct { Destination string `json:"destination,omitempty"` } +// S3StorageInfo contains the struct for when storing files in S3 type S3StorageInfo struct { Destination string `json:"destination,omitempty"` Region string `json:"region,omitempty"` @@ -116,15 +122,18 @@ type S3StorageInfo struct { CannedACL string `json:"canned_acl,omitempty"` } +// StorageInfo contains the struct for either DBFS or S3 storage depending on which one is relevant. type StorageInfo struct { Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` S3 *S3StorageInfo `json:"s3,omitempty"` } +// SparkNodeAwsAttributes is the struct that determines if the node is a spot instance or not type SparkNodeAwsAttributes struct { IsSpot bool `json:"is_spot,omitempty"` } +// SparkNode encapsulates all the attributes of a node that is part of a databricks cluster type SparkNode struct { PrivateIP string `json:"private_ip,omitempty"` PublicDNS string `json:"public_dns,omitempty"` @@ -135,22 +144,26 @@ type SparkNode struct { HostPrivateIP string `json:"host_private_ip,omitempty"` } +// TerminationReason encapsulates the termination code and potential parameters type TerminationReason struct { Code string `json:"code,omitempty"` Parameters map[string]string `json:"parameters,omitempty"` } +// LogSyncStatus encapsulates when the cluster logs were last delivered. type LogSyncStatus struct { LastAttempted int64 `json:"last_attempted,omitempty"` LastException string `json:"last_exception,omitempty"` } +// ClusterCloudProviderNodeInfo encapsulates the existing quota available from the cloud service provider. type ClusterCloudProviderNodeInfo struct { Status []string `json:"status,omitempty"` AvailableCoreQuota float32 `json:"available_core_quota,omitempty"` TotalCoreQuota float32 `json:"total_core_quota,omitempty"` } +// NodeType encapsulates information about a give node when using the list-node-types api type NodeType struct { NodeTypeID string `json:"node_type_id,omitempty"` MemoryMb int32 `json:"memory_mb,omitempty"` @@ -161,25 +174,28 @@ type NodeType struct { NodeInfo *ClusterCloudProviderNodeInfo `json:"node_info,omitempty"` } +// DockerBasicAuth contains the auth information when fetching containers type DockerBasicAuth struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` } +// DockerImage contains the image url and the auth for DCS type DockerImage struct { - Url string `json:"url,omitempty"` + URL string `json:"url,omitempty"` BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` } +// Cluster contains the information when trying to submit api calls or editing a cluster type Cluster struct { - ClusterId string `json:"cluster_id,omitempty"` - NumWorkers int32 `json:"num_workers,omitempty"` - Autoscale *AutoScale `json:"autoscale,omitempty"` - ClusterName string `json:"cluster_name,omitempty"` - SparkVersion string `json:"spark_version,omitempty"` - SparkConf map[string]string `json:"spark_conf,omitempty"` - AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` - NodeTypeID string `json:"node_type_id,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + NumWorkers int32 `json:"num_workers,omitempty"` + Autoscale *AutoScale `json:"autoscale,omitempty"` + ClusterName string `json:"cluster_name,omitempty"` + SparkVersion string `json:"spark_version,omitempty"` + SparkConf map[string]string `json:"spark_conf,omitempty"` + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + NodeTypeID string `json:"node_type_id,omitempty"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -189,10 +205,11 @@ type Cluster struct { SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` - InstancePoolId string `json:"instance_pool_id,omitempty"` + InstancePoolID string `json:"instance_pool_id,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` } +// ClusterInfo contains the information when getting cluster info from the get request. type ClusterInfo struct { NumWorkers int32 `json:"num_workers,omitempty"` AutoScale *AutoScale `json:"autoscale,omitempty"` @@ -208,20 +225,20 @@ type ClusterInfo struct { AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` NodeTypeID string `json:"node_type_id,omitempty"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` - SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` - CustomTags map[string]string `json:"custom_tags,omitempty"` - ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` - InitScripts []StorageInfo `json:"init_scripts,omitempty"` - SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` - AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` - EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` - InstancePoolId string `json:"instance_pool_id,omitempty"` - ClusterSource AwsAvailability `json:"cluster_source,omitempty"` - DockerImage *DockerImage `json:"docker_image,omitempty"` - State ClusterState `json:"state,omitempty"` - StateMessage string `json:"state_message,omitempty"` - StartTime int64 `json:"start_time,omitempty"` - TerminateTime int64 `json:"terminate_time,omitempty"` + SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` + InitScripts []StorageInfo `json:"init_scripts,omitempty"` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + InstancePoolID string `json:"instance_pool_id,omitempty"` + ClusterSource AwsAvailability `json:"cluster_source,omitempty"` + DockerImage *DockerImage `json:"docker_image,omitempty"` + State ClusterState `json:"state,omitempty"` + StateMessage string `json:"state_message,omitempty"` + StartTime int64 `json:"start_time,omitempty"` + TerminateTime int64 `json:"terminate_time,omitempty"` LastStateLossTime int64 `json:"last_state_loss_time,omitempty"` LastActivityTime int64 `json:"last_activity_time,omitempty"` ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"` diff --git a/client/model/command.go b/client/model/command.go index c990b14a0..5420c32cd 100644 --- a/client/model/command.go +++ b/client/model/command.go @@ -1,5 +1,6 @@ package model +// CommandResults is the out put when the command finishes in API 1.2 type CommandResults struct { ResultType string `json:"resultType,omitempty"` Summary string `json:"summary,omitempty"` @@ -7,17 +8,12 @@ type CommandResults struct { Data interface{} `json:"data,omitempty"` Schema interface{} `json:"schema,omitempty"` Truncated bool `json:"truncated,omitempty"` - IsJsonSchema bool `json:"isJsonSchema,omitempty"` + IsJSONSchema bool `json:"isJsonSchema,omitempty"` } +// Command is the struct that contains what the 1.2 api returns for the commands api type Command struct { ID string `json:"id,omitempty"` Status string `json:"status,omitempty"` Results *CommandResults `json:"results,omitempty"` } - -type ExecutionContext struct { - ContextId string `json:"contextId,omitempty"` - ClusterId string `json:"clusterId,omitempty"` - Language Language `json:"language,omitempty"` -} diff --git a/client/model/dbfs.go b/client/model/dbfs.go index fb69a0e9c..36ac984e4 100644 --- a/client/model/dbfs.go +++ b/client/model/dbfs.go @@ -1,5 +1,6 @@ package model +// FileInfo contains information when listing files or fetching files from DBFS api type FileInfo struct { Path string `json:"path,omitempty"` IsDir bool `json:"is_dir,omitempty"` diff --git a/client/model/group.go b/client/model/group.go index 2ff303604..0b087fc17 100644 --- a/client/model/group.go +++ b/client/model/group.go @@ -1,23 +1,33 @@ package model +// GroupMember contains information of a member in a scim group type GroupMember struct { Display string `json:"display,omitempty"` Value string `json:"value,omitempty"` Ref string `json:"$ref,omitempty"` } +// ValueListItem is a struct that contains a field Value. +// This is for the scim api. type ValueListItem struct { Value string `json:"value,omitempty"` } +// GroupPathType describes the possible paths in the SCIM RFC for patch operations type GroupPathType string const ( + // GroupMembersPath is the members path for SCIM patch operation. GroupMembersPath GroupPathType = "members" + + // GroupRolesPath is the roles path for SCIM patch operation. GroupRolesPath GroupPathType = "roles" + + // GroupEntitlementsPath is the entitlements path for SCIM patch operation. GroupEntitlementsPath GroupPathType = "entitlements" ) +// Group contains information about the SCIM group type Group struct { ID string `json:"id,omitempty"` Schemas []URN `json:"schemas,omitempty"` @@ -30,6 +40,7 @@ type Group struct { InheritedRoles []RoleListItem `json:"inherited_roles,omitempty"` } +// GroupList contains a list of groups fetched from a list api call from SCIM api type GroupList struct { TotalResults int32 `json:"totalResults,omitempty"` StartIndex int32 `json:"startIndex,omitempty"` @@ -38,6 +49,7 @@ type GroupList struct { Resources []Group `json:"resources,omitempty"` } +// GroupPatchRequest contains a request structure to make a patch op against SCIM api type GroupPatchRequest struct { Schemas []URN `json:"schemas,omitempty"` Operations []GroupPatchOperations `json:"Operations,omitempty"` diff --git a/client/model/instance_pool.go b/client/model/instance_pool.go index 191c4d0f0..6b3cf916f 100644 --- a/client/model/instance_pool.go +++ b/client/model/instance_pool.go @@ -1,28 +1,32 @@ package model +// InstancePoolAwsAttributes contains aws attributes for AWS Databricks deployments for instance pools type InstancePoolAwsAttributes struct { Availability AwsAvailability `json:"availability,omitempty"` ZoneID string `json:"zone_id,omitempty"` SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"` } +// InstancePoolDiskType contains disk type information for each of the different cloud service providers type InstancePoolDiskType struct { AzureDiskVolumeType string `json:"azure_disk_volume_type,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` } +// InstancePoolDiskSpec contains disk size, type and count information for the pool type InstancePoolDiskSpec struct { DiskType *InstancePoolDiskType `json:"disk_type,omitempty"` DiskCount int32 `json:"disk_count,omitempty"` DiskSize int32 `json:"disk_size,omitempty"` } +// InstancePool describes the instance pool object on Databricks type InstancePool struct { InstancePoolName string `json:"instance_pool_name,omitempty"` MinIdleInstances int32 `json:"min_idle_instances,omitempty"` MaxCapacity int32 `json:"max_capacity,omitempty"` AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` - NodeTypeId string `json:"node_type_id,omitempty"` + NodeTypeID string `json:"node_type_id,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` @@ -30,6 +34,7 @@ type InstancePool struct { PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` } +// InstancePoolStats contains the stats on a given pool type InstancePoolStats struct { UsedCount int32 `json:"used_count,omitempty"` IdleCount int32 `json:"idle_count,omitempty"` @@ -37,13 +42,14 @@ type InstancePoolStats struct { PendingIdleCount int32 `json:"pending_idle_count,omitempty"` } +// InstancePoolInfo encapsulates a get response from the GET api for instance pools on Databricks type InstancePoolInfo struct { - InstancePoolId string `json:"instance_pool_id,omitempty"` + InstancePoolID string `json:"instance_pool_id,omitempty"` InstancePoolName string `json:"instance_pool_name,omitempty"` MinIdleInstances int32 `json:"min_idle_instances,omitempty"` MaxCapacity int32 `json:"max_capacity,omitempty"` AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` - NodeTypeId string `json:"node_type_id,omitempty"` + NodeTypeID string `json:"node_type_id,omitempty"` DefaultTags map[string]string `json:"default_tags,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes,omitempty"` diff --git a/client/model/instance_profile.go b/client/model/instance_profile.go index c17807d91..b4d91d03c 100644 --- a/client/model/instance_profile.go +++ b/client/model/instance_profile.go @@ -1,5 +1,6 @@ package model +// InstanceProfileInfo contains the ARN for aws instance profiles type InstanceProfileInfo struct { InstanceProfileArn string `json:"instance_profile_arn,omitempty"` } diff --git a/client/model/job.go b/client/model/job.go index 755c40087..f9140dab9 100644 --- a/client/model/job.go +++ b/client/model/job.go @@ -1,27 +1,30 @@ package model -//go:generate easytags $GOFILE - +// NotebookTask contains the information for notebook jobs type NotebookTask struct { NotebookPath string `json:"notebook_path,omitempty"` BaseParameters map[string]string `json:"base_parameters,omitempty"` } +// SparkPythonTask contains the information for python jobs type SparkPythonTask struct { PythonFile string `json:"python_file,omitempty"` Parameters []string `json:"parameters,omitempty"` } +// SparkJarTask contains the information for jar jobs type SparkJarTask struct { - JarUri string `json:"jar_uri,omitempty"` + JarURI string `json:"jar_uri,omitempty"` MainClassName string `json:"main_class_name,omitempty"` Parameters []string `json:"parameters,omitempty"` } +// SparkSubmitTask contains the information for spark submit jobs type SparkSubmitTask struct { Parameters []string `json:"parameters,omitempty"` } +// JobEmailNotifications contains the information for email notifications after job completion type JobEmailNotifications struct { OnStart []string `json:"on_start,omitempty"` OnSuccess []string `json:"on_success,omitempty"` @@ -29,19 +32,21 @@ type JobEmailNotifications struct { NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` } +// CronSchedule contains the information for the quartz cron expression type CronSchedule struct { QuartzCronExpression string `json:"quartz_cron_expression,omitempty"` - TimezoneId string `json:"timezone_id,omitempty"` + TimezoneID string `json:"timezone_id,omitempty"` } +// JobSettings contains the information for configuring a job on databricks type JobSettings struct { - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - NewCluster *Cluster `json:"new_cluster,omitempty"` - NotebookTask *NotebookTask `json:"notebook_task,omitempty"` - SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` - Name string `json:"name,omitempty"` + ExistingClusterID string `json:"existing_cluster_id,omitempty"` + NewCluster *Cluster `json:"new_cluster,omitempty"` + NotebookTask *NotebookTask `json:"notebook_task,omitempty"` + SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` + Name string `json:"name,omitempty"` Libraries []Library `json:"libraries,omitempty"` EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` @@ -52,8 +57,9 @@ type JobSettings struct { MaxConcurrentRuns int32 `json:"max_concurrent_runs,omitempty"` } +// Job contains the information when using a GET request from the Databricks Jobs api type Job struct { - JobId int64 `json:"job_id,omitempty"` + JobID int64 `json:"job_id,omitempty"` CreatorUserName string `json:"creator_user_name,omitempty"` Settings *JobSettings `json:"settings,omitempty"` CreatedTime int64 `json:"created_time,omitempty"` diff --git a/client/model/library.go b/client/model/library.go index b3e458be0..4151f611b 100644 --- a/client/model/library.go +++ b/client/model/library.go @@ -1,21 +1,25 @@ package model +// PyPi is a python library hosted on PYPI type PyPi struct { Package string `json:"package,omitempty"` Repo string `json:"repo,omitempty"` } +// Maven is a jar library hosted on Maven type Maven struct { Coordinates string `json:"coordinates,omitempty"` Repo string `json:"repo,omitempty"` Exclusions []string `json:"exclusions,omitempty"` } +// Cran is a R library hosted on Maven type Cran struct { Package string `json:"package,omitempty"` Repo string `json:"repo,omitempty"` } +// Library is a construct that contains information of the location of the library and how to download it type Library struct { Jar string `json:"jar,omitempty"` Egg string `json:"egg,omitempty"` @@ -25,6 +29,7 @@ type Library struct { Cran *Cran `json:"cran,omitempty"` } +// LibraryStatus is the status on a given cluster when using the libraries status api type LibraryStatus struct { Library *Library `json:"library,omitempty"` Status string `json:"status,omitempty"` diff --git a/client/model/notebook.go b/client/model/notebook.go index 158d86431..f6ac24d81 100644 --- a/client/model/notebook.go +++ b/client/model/notebook.go @@ -1,18 +1,23 @@ package model -//go:generate easytags $GOFILE - +// Language is a custom type for langauge types in Databricks notebooks type Language string + +// ObjectType is a custom type for object types in Databricks workspaces type ObjectType string + +// ExportFormat is a custom type for formats in which you can export Databricks workspace components type ExportFormat string +// Different types of export formats available on Databricks const ( Source ExportFormat = "SOURCE" - Html ExportFormat = "HTML" + HTML ExportFormat = "HTML" Jupyter ExportFormat = "JUPYTER" DBC ExportFormat = "DBC" ) +// Different types of langauge formats available on Databricks const ( Scala Language = "SCALA" Python Language = "PYTHON" @@ -20,14 +25,16 @@ const ( R Language = "R" ) +// Different types of export formats available on Databricks const ( Notebook ObjectType = "NOTEBOOK" Directory ObjectType = "DIRECTORY" LibraryObject ObjectType = "LIBRARY" ) +// NotebookInfo contains information when doing a get request or list request on the workspace api type NotebookInfo struct { - ObjectId int64 `json:"object_id,omitempty"` + ObjectID int64 `json:"object_id,omitempty"` ObjectType ObjectType `json:"object_type,omitempty"` Path string `json:"path,omitempty"` Language Language `json:"language,omitempty"` diff --git a/client/model/scim.go b/client/model/scim.go index 3bbb9f9ef..6e8a131f5 100644 --- a/client/model/scim.go +++ b/client/model/scim.go @@ -1,7 +1,9 @@ package model +// URN is a custom type for the SCIM spec for the schema type URN string +// Possible schema URNs for the Databricks SCIM api const ( UserSchema URN = "urn:ietf:params:scim:schemas:core:2.0:User" WorkspaceUserSchema URN = "urn:ietf:params:scim:schemas:extension:workspace:2.0:User" @@ -9,28 +11,34 @@ const ( GroupSchema URN = "urn:ietf:params:scim:schemas:core:2.0:Group" ) +// MembersValue is a list of value items for the members path type MembersValue struct { Members []ValueListItem `json:"members,omitempty"` } +// RolesValue is a list of value items for the roles path type RolesValue struct { Roles []ValueListItem `json:"roles,omitempty"` } +// ValueList is a generic list of value items for any path type ValueList struct { Value []ValueListItem `json:"value,omitempty"` } +// GroupsValue is a list of value items for the groups path type GroupsValue struct { Groups []ValueListItem `json:"groups,omitempty"` } +// GroupPatchOperations is a list of path operations for add or removing group attributes type GroupPatchOperations struct { Op string `json:"op,omitempty"` Path GroupPathType `json:"path,omitempty"` Value []ValueListItem `json:"value,omitempty"` } +// UserPatchOperations is a list of path operations for add or removing user attributes type UserPatchOperations struct { Op string `json:"op,omitempty"` Path string `json:"path,omitempty"` diff --git a/client/model/secret.go b/client/model/secret.go index b4207005a..e56bd5f39 100644 --- a/client/model/secret.go +++ b/client/model/secret.go @@ -1,34 +1,37 @@ package model +// ScopeBackendType is a custom type for the backend type for secret scopes type ScopeBackendType string +// List of constants of ScopeBackendType const ( ScopeBackendTypeDatabricks ScopeBackendType = "DATABRICKS" ) +// SecretScope is a struct that encapsulates the secret scope type SecretScope struct { Name string `json:"name,omitempty"` BackendType ScopeBackendType `json:"backend_type,omitempty"` } +// SecretMetadata is a struct that encapsulates the metadata for a secret object in a scope type SecretMetadata struct { Key string `json:"key,omitempty"` LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` } -type AclPermission string +// ACLPermission is a custom type for acl permissions +type ACLPermission string +// List of possible ACL Permissions on Databricks const ( - AclPermissionRead AclPermission = "READ" - AclPermissionWrite AclPermission = "WRITE" - AclPermissionManage AclPermission = "MANAGE" + ACLPermissionRead ACLPermission = "READ" + ACLPermissionWrite ACLPermission = "WRITE" + ACLPermissionManage ACLPermission = "MANAGE" ) -func ValidSecretAclPermissions() []AclPermission { - return []AclPermission{AclPermissionManage, AclPermissionRead, AclPermissionWrite} -} - -type AclItem struct { +// ACLItem is a struct that contains information about a secret scope acl +type ACLItem struct { Principal string `json:"principal,omitempty"` - Permission AclPermission `json:"permission,omitempty"` + Permission ACLPermission `json:"permission,omitempty"` } diff --git a/client/model/token.go b/client/model/token.go index 6c7cde04f..72e7ade35 100644 --- a/client/model/token.go +++ b/client/model/token.go @@ -1,12 +1,12 @@ package model -//go:generate easytags $GOFILE - +// TokenResponse is a struct that contains information about token that is created from the create tokens api type TokenResponse struct { TokenValue string `json:"token_value,omitempty"` TokenInfo *TokenInfo `json:"token_info,omitempty"` } +// TokenInfo is a struct that contains metadata about a given token type TokenInfo struct { TokenID string `json:"token_id,omitempty"` CreationTime int64 `json:"creation_time,omitempty"` diff --git a/client/model/user.go b/client/model/user.go index 5960ea1fd..eef2d54f2 100644 --- a/client/model/user.go +++ b/client/model/user.go @@ -1,30 +1,37 @@ package model +// Entitlement is a custom type that contains a set of entitlements for a user/group type Entitlement string +// List of possible entitlement constants on Databricks const ( AllowClusterCreateEntitlement Entitlement = "allow-cluster-create" AllowInstancePoolCreateEntitlement Entitlement = "allow-instance-pool-create" ) +// GroupsListItem is a struct that contains a value of group id type GroupsListItem struct { Value string `json:"value,omitempty"` } +// EntitlementsListItem is a struct that contains a value of entitlement type EntitlementsListItem struct { Value Entitlement `json:"value,omitempty"` } +// RoleListItem is a struct that contains a value of role type RoleListItem struct { Value string `json:"value,omitempty"` } +// Email is a struct that contains information about a user's email type Email struct { Type interface{} `json:"type,omitempty"` Value string `json:"value,omitempty"` Primary interface{} `json:"primary,omitempty"` } +// User is a struct that contains all the information about a SCIM user type User struct { ID string `json:"id,omitempty"` Emails []Email `json:"emails,omitempty"` @@ -40,6 +47,7 @@ type User struct { InheritedRoles []RoleListItem `json:"inherited_roles,omitempty"` } +// UserPatchRequest is a struct that contains all the information for a PATCH request to the SCIM users api type UserPatchRequest struct { Schemas []URN `json:"schemas,omitempty"` Operations []UserPatchOperations `json:"Operations,omitempty"` diff --git a/client/service/clusters_test.go b/client/service/clusters_test.go index 2af4dfdc2..9f299fc39 100644 --- a/client/service/clusters_test.go +++ b/client/service/clusters_test.go @@ -25,7 +25,7 @@ func TestClustersAPI_Create(t *testing.T) { }`, responseStatus: http.StatusOK, args: args{ - ClusterId: "my-cluster-id", + ClusterID: "my-cluster-id", }, want: model.ClusterInfo{ClusterID: "my-cluster"}, wantErr: false, @@ -35,7 +35,7 @@ func TestClustersAPI_Create(t *testing.T) { response: "", responseStatus: http.StatusBadRequest, args: args{ - ClusterId: "my-cluster-id", + ClusterID: "my-cluster-id", }, want: model.ClusterInfo{}, wantErr: true, diff --git a/client/service/instance_pools_integration_test.go b/client/service/instance_pools_integration_test.go index f5e48bdaf..9c09c0b05 100644 --- a/client/service/instance_pools_integration_test.go +++ b/client/service/instance_pools_integration_test.go @@ -23,7 +23,7 @@ func TestInstancePools(t *testing.T) { DiskCount: 1, DiskSize: 32, }, - NodeTypeId: GetCloudInstanceType(client), + NodeTypeID: GetCloudInstanceType(client), IdleInstanceAutoTerminationMinutes: 20, PreloadedSparkVersions: []string{ "6.3.x-scala2.11", @@ -33,21 +33,21 @@ func TestInstancePools(t *testing.T) { assert.NoError(t, err, err) defer func() { - err := client.InstancePools().Delete(poolInfo.InstancePoolId) + err := client.InstancePools().Delete(poolInfo.InstancePoolID) assert.NoError(t, err, err) }() - poolReadInfo, err := client.InstancePools().Read(poolInfo.InstancePoolId) + poolReadInfo, err := client.InstancePools().Read(poolInfo.InstancePoolID) assert.NoError(t, err, err) - assert.Equal(t, poolInfo.InstancePoolId, poolReadInfo.InstancePoolId) + assert.Equal(t, poolInfo.InstancePoolID, poolReadInfo.InstancePoolID) assert.Equal(t, pool.InstancePoolName, poolReadInfo.InstancePoolName) assert.Equal(t, pool.MinIdleInstances, poolReadInfo.MinIdleInstances) assert.Equal(t, pool.MaxCapacity, poolReadInfo.MaxCapacity) - assert.Equal(t, pool.NodeTypeId, poolReadInfo.NodeTypeId) + assert.Equal(t, pool.NodeTypeID, poolReadInfo.NodeTypeID) assert.Equal(t, pool.IdleInstanceAutoTerminationMinutes, poolReadInfo.IdleInstanceAutoTerminationMinutes) err = client.InstancePools().Update(model.InstancePoolInfo{ - InstancePoolId: poolReadInfo.InstancePoolId, + InstancePoolID: poolReadInfo.InstancePoolID, InstancePoolName: "my_instance_pool", MinIdleInstances: 0, MaxCapacity: 20, @@ -58,7 +58,7 @@ func TestInstancePools(t *testing.T) { DiskCount: 1, DiskSize: 32, }, - NodeTypeId: GetCloudInstanceType(client), + NodeTypeID: GetCloudInstanceType(client), IdleInstanceAutoTerminationMinutes: 20, PreloadedSparkVersions: []string{ "6.3.x-scala2.11", @@ -66,7 +66,7 @@ func TestInstancePools(t *testing.T) { }) assert.NoError(t, err, err) - poolReadInfo, err = client.InstancePools().Read(poolInfo.InstancePoolId) + poolReadInfo, err = client.InstancePools().Read(poolInfo.InstancePoolID) assert.NoError(t, err, err) assert.Equal(t, poolReadInfo.MaxCapacity, int32(20)) diff --git a/client/service/instance_pools_test.go b/client/service/instance_pools_test.go index eb1d26808..5b434462b 100644 --- a/client/service/instance_pools_test.go +++ b/client/service/instance_pools_test.go @@ -29,7 +29,7 @@ func TestInstancePoolsAPI_Create(t *testing.T) { InstancePoolName: "", MinIdleInstances: 0, MaxCapacity: 10, - NodeTypeId: "Standard_DS3_v2", + NodeTypeID: "Standard_DS3_v2", IdleInstanceAutoTerminationMinutes: 60, EnableElasticDisk: false, DiskSpec: &model.InstancePoolDiskSpec{ @@ -42,7 +42,7 @@ func TestInstancePoolsAPI_Create(t *testing.T) { }, }, want: model.InstancePoolInfo{ - InstancePoolId: "0101-120000-brick1-pool-ABCD1234", + InstancePoolID: "0101-120000-brick1-pool-ABCD1234", }, wantErr: false, }, @@ -101,10 +101,10 @@ func TestInstancePoolsAPI_Update(t *testing.T) { response: "", args: args{ InstancePoolInfo: &model.InstancePoolInfo{ - InstancePoolId: "0101-120000-brick1-pool-ABCD1234", + InstancePoolID: "0101-120000-brick1-pool-ABCD1234", MinIdleInstances: 0, MaxCapacity: 10, - NodeTypeId: "Standard_DS3_v2", + NodeTypeID: "Standard_DS3_v2", IdleInstanceAutoTerminationMinutes: 60, EnableElasticDisk: false, DiskSpec: &model.InstancePoolDiskSpec{ @@ -182,7 +182,7 @@ func TestInstancePoolsAPI_Read(t *testing.T) { InstancePoolId: "101-120000-brick1-pool-ABCD1234", }, want: model.InstancePoolInfo{ - InstancePoolId: "101-120000-brick1-pool-ABCD1234", + InstancePoolID: "101-120000-brick1-pool-ABCD1234", InstancePoolName: "mypool", MinIdleInstances: 0, AwsAttributes: &model.InstancePoolAwsAttributes{ @@ -190,7 +190,7 @@ func TestInstancePoolsAPI_Read(t *testing.T) { ZoneID: "us-west-2a", SpotBidPricePercent: 100, }, - NodeTypeId: "c4.2xlarge", + NodeTypeID: "c4.2xlarge", IdleInstanceAutoTerminationMinutes: 60, EnableElasticDisk: false, DiskSpec: &model.InstancePoolDiskSpec{ diff --git a/client/service/jobs_integration_test.go b/client/service/jobs_integration_test.go index 573bbd9df..a50f96886 100644 --- a/client/service/jobs_integration_test.go +++ b/client/service/jobs_integration_test.go @@ -43,14 +43,14 @@ func TestJobsCreate(t *testing.T) { MaxRetries: 1, Schedule: &model.CronSchedule{ QuartzCronExpression: "0 15 22 ? * *", - TimezoneId: "America/Los_Angeles", + TimezoneID: "America/Los_Angeles", }, MaxConcurrentRuns: 1, } job, err := client.Jobs().Create(jobSettings) assert.NoError(t, err, err) - id := job.JobId + id := job.JobID defer func() { err := client.Jobs().Delete(id) assert.NoError(t, err, err) diff --git a/client/service/jobs_test.go b/client/service/jobs_test.go index 2cb85799e..52ca07f82 100644 --- a/client/service/jobs_test.go +++ b/client/service/jobs_test.go @@ -24,10 +24,10 @@ func TestJobsAPI_Create(t *testing.T) { }`, responseStatus: http.StatusOK, args: args{ - ExistingClusterId: "my-cluster-id", + ExistingClusterID: "my-cluster-id", }, want: model.Job{ - JobId: 1, + JobID: 1, }, wantErr: false, }, @@ -36,7 +36,7 @@ func TestJobsAPI_Create(t *testing.T) { response: "", responseStatus: http.StatusBadRequest, args: args{ - ExistingClusterId: "my-cluster-id", + ExistingClusterID: "my-cluster-id", }, want: model.Job{}, wantErr: true, @@ -202,7 +202,7 @@ func TestJobsAPI_Read(t *testing.T) { }, wantUri: "/api/2.0/jobs/get?job_id=1", want: model.Job{ - JobId: 1, + JobID: 1, Settings: &model.JobSettings{ NewCluster: &model.Cluster{ NumWorkers: 10, @@ -235,7 +235,7 @@ func TestJobsAPI_Read(t *testing.T) { MaxRetries: 1, Schedule: &model.CronSchedule{ QuartzCronExpression: "0 15 22 ? * *", - TimezoneId: "America/Los_Angeles", + TimezoneID: "America/Los_Angeles", }, }, CreatedTime: 1457570074236, diff --git a/client/service/notebooks_test.go b/client/service/notebooks_test.go index ba498ceaa..591d40e66 100644 --- a/client/service/notebooks_test.go +++ b/client/service/notebooks_test.go @@ -147,12 +147,12 @@ func TestNotebooksAPI_ListNonRecursive(t *testing.T) { wantUri: "/api/2.0/workspace/list?path=%2Ftest%2Fpath", want: []model.NotebookInfo{ { - ObjectId: 123, + ObjectID: 123, ObjectType: model.Directory, Path: "/Users/user@example.com/project", }, { - ObjectId: 456, + ObjectID: 456, ObjectType: model.Notebook, Language: model.Python, Path: "/Users/user@example.com/PythonExampleNotebook", @@ -223,13 +223,13 @@ func TestNotebooksAPI_ListRecursive(t *testing.T) { wantUri: []string{"/api/2.0/workspace/list?path=%2Ftest%2Fpath", "/api/2.0/workspace/list?path=%2FUsers%2Fuser%40example.com%2Fproject"}, want: []model.NotebookInfo{ { - ObjectId: 457, + ObjectID: 457, ObjectType: model.Notebook, Language: model.Python, Path: "/Users/user@example.com/Notebook2", }, { - ObjectId: 456, + ObjectID: 456, ObjectType: model.Notebook, Language: model.Python, Path: "/Users/user@example.com/PythonExampleNotebook", @@ -303,7 +303,7 @@ func TestNotebooksAPI_Read(t *testing.T) { }, responseStatus: http.StatusOK, want: model.NotebookInfo{ - ObjectId: 789, + ObjectID: 789, ObjectType: model.Notebook, Path: "/Users/user@example.com/project/ScalaExampleNotebook", Language: model.Scala, diff --git a/client/service/secret_acls.go b/client/service/secret_acls.go index 6cae5e4f1..49d8fe4e1 100644 --- a/client/service/secret_acls.go +++ b/client/service/secret_acls.go @@ -12,11 +12,11 @@ type SecretAclsAPI struct { } // Create creates or overwrites the ACL associated with the given principal (user or group) on the specified scope point -func (a SecretAclsAPI) Create(scope string, principal string, permission model.AclPermission) error { +func (a SecretAclsAPI) Create(scope string, principal string, permission model.ACLPermission) error { data := struct { Scope string `json:"scope,omitempty"` Principal string `json:"principal,omitempty"` - Permission model.AclPermission `json:"permission,omitempty"` + Permission model.ACLPermission `json:"permission,omitempty"` }{ scope, principal, @@ -40,8 +40,8 @@ func (a SecretAclsAPI) Delete(scope string, principal string) error { } // Read describe the details about the given ACL, such as the group and permission -func (a SecretAclsAPI) Read(scope string, principal string) (model.AclItem, error) { - var aclItem model.AclItem +func (a SecretAclsAPI) Read(scope string, principal string) (model.ACLItem, error) { + var aclItem model.ACLItem data := struct { Scope string `json:"scope,omitempty" url:"scope,omitempty"` @@ -60,9 +60,9 @@ func (a SecretAclsAPI) Read(scope string, principal string) (model.AclItem, erro } // List lists the ACLs set on the given scope -func (a SecretAclsAPI) List(scope string) ([]model.AclItem, error) { +func (a SecretAclsAPI) List(scope string) ([]model.ACLItem, error) { var aclItem struct { - Items []model.AclItem `json:"items,omitempty"` + Items []model.ACLItem `json:"items,omitempty"` } data := struct { diff --git a/client/service/secret_acls_test.go b/client/service/secret_acls_test.go index 76e5d8e2f..3a1c7c74e 100644 --- a/client/service/secret_acls_test.go +++ b/client/service/secret_acls_test.go @@ -12,7 +12,7 @@ func TestSecretAclsAPI_Create(t *testing.T) { type args struct { Scope string `json:"scope"` Principal string `json:"principal"` - Permission model.AclPermission `json:"permission"` + Permission model.ACLPermission `json:"permission"` } tests := []struct { name string @@ -26,7 +26,7 @@ func TestSecretAclsAPI_Create(t *testing.T) { args: args{ Scope: "my-scope", Principal: "my-principal", - Permission: model.AclPermissionManage, + Permission: model.ACLPermissionManage, }, wantErr: false, }, @@ -80,7 +80,7 @@ func TestSecretAclsAPI_List(t *testing.T) { name string response string args args - want []model.AclItem + want []model.ACLItem wantErr bool }{ { @@ -99,14 +99,14 @@ func TestSecretAclsAPI_List(t *testing.T) { args: args{ Scope: "my-scope", }, - want: []model.AclItem{ + want: []model.ACLItem{ { Principal: "admins", - Permission: model.AclPermissionManage, + Permission: model.ACLPermissionManage, }, { Principal: "data-scientists", - Permission: model.AclPermissionRead, + Permission: model.ACLPermissionRead, }, }, wantErr: false, @@ -131,7 +131,7 @@ func TestSecretAclsAPI_Read(t *testing.T) { name string response string args args - want model.AclItem + want model.ACLItem wantErr bool }{ { @@ -144,9 +144,9 @@ func TestSecretAclsAPI_Read(t *testing.T) { Scope: "my-scope", Principal: "my-principal", }, - want: model.AclItem{ + want: model.ACLItem{ Principal: "data-scientists", - Permission: model.AclPermissionRead, + Permission: model.ACLPermissionRead, }, wantErr: false, }, diff --git a/client/service/secrets_scopes_acls_integration_test.go b/client/service/secrets_scopes_acls_integration_test.go index 989bc49c3..8bec5bb57 100644 --- a/client/service/secrets_scopes_acls_integration_test.go +++ b/client/service/secrets_scopes_acls_integration_test.go @@ -47,7 +47,7 @@ func TestSecretsScopesAclsIntegration(t *testing.T) { assert.NoError(t, err, err) assert.Equal(t, testKey, secret.Key, "Secret lookup does not yield same key") - err = client.SecretAcls().Create(testScope, testPrincipal, model.AclPermissionManage) + err = client.SecretAcls().Create(testScope, testPrincipal, model.ACLPermissionManage) assert.NoError(t, err, err) secretAcls, err := client.SecretAcls().List(testScope) @@ -57,7 +57,7 @@ func TestSecretsScopesAclsIntegration(t *testing.T) { secretAcl, err := client.SecretAcls().Read(testScope, testPrincipal) assert.NoError(t, err, err) assert.Equal(t, testPrincipal, secretAcl.Principal, "Secret lookup does not yield same key") - assert.Equal(t, model.AclPermissionManage, secretAcl.Permission, "Secret lookup does not yield same key") + assert.Equal(t, model.ACLPermissionManage, secretAcl.Permission, "Secret lookup does not yield same key") err = client.Secrets().Delete(testScope, testKey) assert.NoError(t, err, err) diff --git a/databricks/azure_ws_init_test.go b/databricks/azure_ws_init_test.go index 50a0150f4..9ebcd2713 100644 --- a/databricks/azure_ws_init_test.go +++ b/databricks/azure_ws_init_test.go @@ -47,14 +47,14 @@ func TestAzureAuthCreateApiToken(t *testing.T) { InstancePoolName: "my_instance_pool", MinIdleInstances: 0, MaxCapacity: 10, - NodeTypeId: "Standard_DS3_v2", + NodeTypeID: "Standard_DS3_v2", IdleInstanceAutoTerminationMinutes: 20, PreloadedSparkVersions: []string{ "6.3.x-scala2.11", }, }) defer func() { - err := api.InstancePools().Delete(instancePoolInfo.InstancePoolId) + err := api.InstancePools().Delete(instancePoolInfo.InstancePoolID) assert.NoError(t, err, err) }() diff --git a/databricks/data_source_databricks_notebook.go b/databricks/data_source_databricks_notebook.go index 167f624b4..848333d82 100644 --- a/databricks/data_source_databricks_notebook.go +++ b/databricks/data_source_databricks_notebook.go @@ -24,7 +24,7 @@ func dataSourceNotebook() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{ string(model.DBC), string(model.Source), - string(model.Html), + string(model.HTML), }, false), }, "content": &schema.Schema{ @@ -75,7 +75,7 @@ func dataSourceNotebookRead(d *schema.ResourceData, m interface{}) error { if err != nil { return err } - err = d.Set("object_id", int(notebookInfo.ObjectId)) + err = d.Set("object_id", int(notebookInfo.ObjectID)) if err != nil { return err } diff --git a/databricks/resource_databricks_cluster.go b/databricks/resource_databricks_cluster.go index fc4fd9b89..7811694af 100644 --- a/databricks/resource_databricks_cluster.go +++ b/databricks/resource_databricks_cluster.go @@ -710,7 +710,7 @@ func resourceClusterRead(d *schema.ResourceData, m interface{}) error { if clusterInfo.DockerImage != nil { dockerImage := map[string]string{} - dockerImage["url"] = clusterInfo.DockerImage.Url + dockerImage["url"] = clusterInfo.DockerImage.URL if clusterInfo.DockerImage.BasicAuth != nil { dockerImage["username"] = clusterInfo.DockerImage.BasicAuth.Username dockerImage["password"] = clusterInfo.DockerImage.BasicAuth.Password @@ -745,7 +745,7 @@ func resourceClusterRead(d *schema.ResourceData, m interface{}) error { } if _, ok := d.GetOk("instance_pool_id"); ok { - err := d.Set("instance_pool_id", clusterInfo.InstancePoolId) + err := d.Set("instance_pool_id", clusterInfo.InstancePoolID) if err != nil { return err } @@ -949,7 +949,7 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { if model.ContainsClusterState([]model.ClusterState{model.ClusterState(model.ClusterStateTerminated)}, clusterState) { cluster := parseSchemaToCluster(d, "") - cluster.ClusterId = id + cluster.ClusterID = id err := client.Clusters().Edit(cluster) if err != nil { return err @@ -986,7 +986,7 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { return resourceClusterRead(d, m) } else if model.ContainsClusterState([]model.ClusterState{model.ClusterState(model.ClusterStateRunning)}, clusterState) { cluster := parseSchemaToCluster(d, "") - cluster.ClusterId = id + cluster.ClusterID = id if len(installs) > 0 { err = client.Libraries().Create(id, installs) @@ -1020,7 +1020,7 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { return err } cluster := parseSchemaToCluster(d, "") - cluster.ClusterId = id + cluster.ClusterID = id if len(installs) > 0 { err = client.Libraries().Create(id, installs) @@ -1239,7 +1239,7 @@ func parseSchemaToCluster(d *schema.ResourceData, schemaAttPrefix string) model. if dockerImageSet, ok := d.GetOk(schemaAttPrefix + "docker_image"); ok { dockerImageConf := getMapFromOneItemSet(dockerImageSet) if url, ok := dockerImageConf["url"]; ok { - dockerImageData.Url = url.(string) + dockerImageData.URL = url.(string) } dockerAuthData := model.DockerBasicAuth{} username, userOk := dockerImageConf["username"] @@ -1269,7 +1269,7 @@ func parseSchemaToCluster(d *schema.ResourceData, schemaAttPrefix string) model. //Deal with instance pool id if instancePoolID, ok := d.GetOk(schemaAttPrefix + "instance_pool_id"); ok { - cluster.InstancePoolId = instancePoolID.(string) + cluster.InstancePoolID = instancePoolID.(string) } //Deal with idempotency token diff --git a/databricks/resource_databricks_instance_pool.go b/databricks/resource_databricks_instance_pool.go index 709f7871a..cac26a0fb 100644 --- a/databricks/resource_databricks_instance_pool.go +++ b/databricks/resource_databricks_instance_pool.go @@ -174,7 +174,7 @@ func resourceInstancePoolCreate(d *schema.ResourceData, m interface{}) error { } if nodeTypeId, ok := d.GetOk("node_type_id"); ok { - instancePool.NodeTypeId = nodeTypeId.(string) + instancePool.NodeTypeID = nodeTypeId.(string) } if customTags, ok := d.GetOk("custom_tags"); ok { @@ -221,7 +221,7 @@ func resourceInstancePoolCreate(d *schema.ResourceData, m interface{}) error { if err != nil { return err } - d.SetId(instancePoolInfo.InstancePoolId) + d.SetId(instancePoolInfo.InstancePoolID) return resourceInstancePoolRead(d, m) } @@ -270,7 +270,7 @@ func resourceInstancePoolRead(d *schema.ResourceData, m interface{}) error { } } - err = d.Set("node_type_id", instancePoolInfo.NodeTypeId) + err = d.Set("node_type_id", instancePoolInfo.NodeTypeID) if err != nil { return err } @@ -328,8 +328,8 @@ func resourceInstancePoolUpdate(d *schema.ResourceData, m interface{}) error { instancePoolInfo.MinIdleInstances = int32(d.Get("min_idle_instances").(int)) instancePoolInfo.MaxCapacity = int32(d.Get("max_capacity").(int)) instancePoolInfo.IdleInstanceAutoTerminationMinutes = int32(d.Get("idle_instance_autotermination_minutes").(int)) - instancePoolInfo.InstancePoolId = id - instancePoolInfo.NodeTypeId = d.Get("node_type_id").(string) + instancePoolInfo.InstancePoolID = id + instancePoolInfo.NodeTypeID = d.Get("node_type_id").(string) err := client.InstancePools().Update(instancePoolInfo) if err != nil { diff --git a/databricks/resource_databricks_job.go b/databricks/resource_databricks_job.go index 6f315208d..bd0f81416 100644 --- a/databricks/resource_databricks_job.go +++ b/databricks/resource_databricks_job.go @@ -454,8 +454,8 @@ func resourceJobCreate(d *schema.ResourceData, m interface{}) error { if err != nil { return err } - log.Println(job.JobId) - d.SetId(strconv.Itoa(int(job.JobId))) + log.Println(job.JobID) + d.SetId(strconv.Itoa(int(job.JobID))) return resourceJobRead(d, m) } @@ -478,7 +478,7 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { } if _, ok := d.GetOk("existing_cluster_id"); ok { - err := d.Set("existing_cluster_id", job.Settings.ExistingClusterId) + err := d.Set("existing_cluster_id", job.Settings.ExistingClusterID) if err != nil { return err } @@ -564,7 +564,7 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { newClusterSettings["init_scripts"] = listOfInitScripts dockerImage := map[string]string{} - dockerImage["url"] = job.Settings.NewCluster.DockerImage.Url + dockerImage["url"] = job.Settings.NewCluster.DockerImage.URL if job.Settings.NewCluster.DockerImage.BasicAuth != nil { dockerImage["username"] = job.Settings.NewCluster.DockerImage.BasicAuth.Username dockerImage["password"] = job.Settings.NewCluster.DockerImage.BasicAuth.Password @@ -578,7 +578,7 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { newClusterSettings["enable_elastic_disk"] = job.Settings.NewCluster.EnableElasticDisk - newClusterSettings["instance_pool_id"] = job.Settings.NewCluster.InstancePoolId + newClusterSettings["instance_pool_id"] = job.Settings.NewCluster.InstancePoolID } libraries := job.Settings.Libraries @@ -686,7 +686,7 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { } if job.Settings.SparkJarTask != nil { - err = d.Set("jar_uri", job.Settings.SparkJarTask.JarUri) + err = d.Set("jar_uri", job.Settings.SparkJarTask.JarURI) if err != nil { return err } @@ -801,7 +801,7 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { sched := map[string]string{} sched["quartz_cron_expression"] = job.Settings.Schedule.QuartzCronExpression - sched["timezone_id"] = job.Settings.Schedule.TimezoneId + sched["timezone_id"] = job.Settings.Schedule.TimezoneID schedSet := []map[string]string{sched} err = d.Set("schedule", schedSet) @@ -820,7 +820,7 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { return err } - err = d.Set("job_id", job.JobId) + err = d.Set("job_id", job.JobID) if err != nil { return err } @@ -870,7 +870,7 @@ func parseSchemaToJobSettings(d *schema.ResourceData) model.JobSettings { var jobSettings model.JobSettings if existingClusterId, ok := d.GetOk("existing_cluster_id"); ok { - jobSettings.ExistingClusterId = existingClusterId.(string) + jobSettings.ExistingClusterID = existingClusterId.(string) } cluster := parseSchemaToCluster(d, "new_cluster.0.") @@ -946,7 +946,7 @@ func parseSchemaToJobSettings(d *schema.ResourceData) model.JobSettings { scheduleMap := getMapFromOneItemSet(schedule) jobSettings.Schedule = &model.CronSchedule{ QuartzCronExpression: scheduleMap["quartz_cron_expression"].(string), - TimezoneId: scheduleMap["timezone_id"].(string), + TimezoneID: scheduleMap["timezone_id"].(string), } } @@ -972,7 +972,7 @@ func parseSchemaToNotebookTask(d *schema.ResourceData) *model.NotebookTask { func parseSchemaToSparkJarTask(d *schema.ResourceData) *model.SparkJarTask { var sparkJarTask model.SparkJarTask if uri, ok := d.GetOk("jar_uri"); ok { - sparkJarTask.JarUri = uri.(string) + sparkJarTask.JarURI = uri.(string) } if cName, ok := d.GetOk("jar_main_class_name"); ok { sparkJarTask.MainClassName = cName.(string) diff --git a/databricks/resource_databricks_notebook.go b/databricks/resource_databricks_notebook.go index 6d2a98569..94d69f8ed 100644 --- a/databricks/resource_databricks_notebook.go +++ b/databricks/resource_databricks_notebook.go @@ -76,7 +76,7 @@ func resourceNotebook() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{ string(model.DBC), string(model.Source), - string(model.Html), + string(model.HTML), }, false), }, "object_type": &schema.Schema{ @@ -166,7 +166,7 @@ func resourceNotebookRead(d *schema.ResourceData, m interface{}) error { if err != nil { return err } - err = d.Set("object_id", int(notebookInfo.ObjectId)) + err = d.Set("object_id", int(notebookInfo.ObjectID)) if err != nil { return err } diff --git a/databricks/resource_databricks_secret_acl.go b/databricks/resource_databricks_secret_acl.go index 1abedcd2b..4a7530d4d 100644 --- a/databricks/resource_databricks_secret_acl.go +++ b/databricks/resource_databricks_secret_acl.go @@ -49,7 +49,7 @@ func resourceSecretAclCreate(d *schema.ResourceData, m interface{}) error { client := m.(service.DBApiClient) scopeName := d.Get("scope").(string) principal := d.Get("principal").(string) - permission := model.AclPermission(d.Get("permission").(string)) + permission := model.ACLPermission(d.Get("permission").(string)) err := client.SecretAcls().Create(scopeName, principal, permission) if err != nil { return err diff --git a/databricks/resource_databricks_secret_acl_test.go b/databricks/resource_databricks_secret_acl_test.go index 5e43d2511..991c41158 100644 --- a/databricks/resource_databricks_secret_acl_test.go +++ b/databricks/resource_databricks_secret_acl_test.go @@ -13,7 +13,7 @@ import ( func TestAccSecretAclResource(t *testing.T) { //var secretScope model.Secre - var secretAcl model.AclItem + var secretAcl model.ACLItem // generate a random name for each tokenInfo test run, to avoid // collisions from multiple concurrent tests. // the acctest package includes many helpers such as RandStringFromCharSet @@ -66,16 +66,16 @@ func testSecretAclPreCheck(t *testing.T) { return } -func testSecretAclValues(t *testing.T, acl *model.AclItem, permission, principal string) resource.TestCheckFunc { +func testSecretAclValues(t *testing.T, acl *model.ACLItem, permission, principal string) resource.TestCheckFunc { return func(s *terraform.State) error { - assert.True(t, acl.Permission == model.AclPermissionRead) + assert.True(t, acl.Permission == model.ACLPermissionRead) assert.True(t, acl.Principal == principal) return nil } } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testSecretAclResourceExists(n string, aclItem *model.AclItem, t *testing.T) resource.TestCheckFunc { +func testSecretAclResourceExists(n string, aclItem *model.ACLItem, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n]