Skip to content

Commit

Permalink
Fixed & refactored linting issues in client/model package
Browse files Browse the repository at this point in the history
  • Loading branch information
stikkireddy committed Apr 20, 2020
1 parent c91b8f7 commit 0bbf3e4
Show file tree
Hide file tree
Showing 30 changed files with 244 additions and 174 deletions.
131 changes: 74 additions & 57 deletions client/model/cluster.go
@@ -1,82 +1,84 @@
package model

import "errors"

// AutoScale is a struct the describes auto scaling for clusters
type AutoScale struct {
MinWorkers int32 `json:"min_workers,omitempty"`
MaxWorkers int32 `json:"max_workers,omitempty"`
}

// AwsAvailability is a type for describing AWS availability on cluster nodes
type AwsAvailability string

const (
// AwsAvailabilitySpot is spot instance type for clusters
AwsAvailabilitySpot = "SPOT"
// AwsAvailabilityOnDemand is OnDemand instance type for clusters
AwsAvailabilityOnDemand = "ON_DEMAND"
// AwsAvailabilitySpotWithFallback is Spot instance type for clusters with option
// to fallback into on-demand if instance cannot be acquired
AwsAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK"
)

func GetAwsAvailability(val string) (AwsAvailability, error) {
switch val {
case "SPOT":
return AwsAvailabilitySpot, nil
case "ON_DEMAND":
return AwsAvailabilityOnDemand, nil
case "SPOT_WITH_FALLBACK":
return AwsAvailabilitySpotWithFallback, nil
}
return "", errors.New("No Match!")
}

// AzureDiskVolumeType is disk type on azure vms
type AzureDiskVolumeType string

const (
// AzureDiskVolumeTypeStandard is for standard local redundant storage
AzureDiskVolumeTypeStandard = "STANDARD_LRS"
// AzureDiskVolumeTypePremium is for premium local redundant storage
AzureDiskVolumeTypePremium = "PREMIUM_LRS"
)

func GetAzureDiskVolumeType(val string) (AzureDiskVolumeType, error) {
switch val {
case "STANDARD_LRS":
return AzureDiskVolumeTypeStandard, nil
case "PREMIUM_LRS":
return AzureDiskVolumeTypePremium, nil
}
return "", errors.New("No Match!")
}

// EbsVolumeType is disk type on aws vms
type EbsVolumeType string

const (
// EbsVolumeTypeGeneralPurposeSsd is general purpose ssd (starts at 32 gb)
EbsVolumeTypeGeneralPurposeSsd = "GENERAL_PURPOSE_SSD"
// EbsVolumeTypeThroughputOptimizedHdd is throughput optimized hdd (starts at 500 gb)
EbsVolumeTypeThroughputOptimizedHdd = "THROUGHPUT_OPTIMIZED_HDD"
)

func GetEbsVolumeType(val string) (EbsVolumeType, error) {
switch val {
case "GENERAL_PURPOSE_SSD":
return EbsVolumeTypeGeneralPurposeSsd, nil
case "THROUGHPUT_OPTIMIZED_HDD":
return EbsVolumeTypeThroughputOptimizedHdd, nil
}
return "", errors.New("No Match!")
}

// ClusterState is for describing possible cluster states
type ClusterState string

const (
// ClusterStatePending is for PENDING state
ClusterStatePending = "PENDING"

// ClusterStateRunning is for RUNNING state
ClusterStateRunning = "RUNNING"

// ClusterStateRestarting is for RESTARTING state
ClusterStateRestarting = "RESTARTING"

// ClusterStateResizing is for RESIZING state
ClusterStateResizing = "RESIZING"

// ClusterStateTerminating is for TERMINATING state
ClusterStateTerminating = "TERMINATING"

// ClusterStateTerminated is for TERMINATED state
ClusterStateTerminated = "TERMINATED"

// ClusterStateError is for ERROR state
ClusterStateError = "ERROR"

// ClusterStateUnknown is for UNKNOWN state
ClusterStateUnknown = "UNKNOWN"
)

// ClusterStateNonRunnable is a list of states in which the cluster cannot go back into running by itself
// without user intervention
var ClusterStateNonRunnable = []ClusterState{ClusterStateTerminating, ClusterStateTerminated, ClusterStateError, ClusterStateUnknown}

// ClusterStateNonTerminating is a list of states in which the cluster cannot go back into terminated by itself
//// without user intervention
var ClusterStateNonTerminating = []ClusterState{ClusterStatePending, ClusterStateRunning, ClusterStateRestarting, ClusterStateResizing, ClusterStateUnknown}

// ContainsClusterState given a set of cluster states and a search state it will return true if the state is in the
// given set
func ContainsClusterState(clusterStates []ClusterState, searchState ClusterState) bool {
for _, state := range clusterStates {
if state == searchState {
Expand All @@ -86,11 +88,13 @@ func ContainsClusterState(clusterStates []ClusterState, searchState ClusterState
return false
}

// ZonesInfo encapsulates the zone information from the zones api call
type ZonesInfo struct {
Zones []string `json:"zones,omitempty"`
DefaultZone string `json:"default_zone,omitempty"`
}

// AwsAttributes encapsulates the aws attributes for aws based clusters
type AwsAttributes struct {
FirstOnDemand int32 `json:"first_on_demand,omitempty"`
Availability AwsAvailability `json:"availability,omitempty"`
Expand All @@ -102,10 +106,12 @@ type AwsAttributes struct {
EbsVolumeSize int32 `json:"ebs_volume_size,omitempty"`
}

// DbfsStorageInfo contains the destination string for DBFS
type DbfsStorageInfo struct {
Destination string `json:"destination,omitempty"`
}

// S3StorageInfo contains the struct for when storing files in S3
type S3StorageInfo struct {
Destination string `json:"destination,omitempty"`
Region string `json:"region,omitempty"`
Expand All @@ -116,15 +122,18 @@ type S3StorageInfo struct {
CannedACL string `json:"canned_acl,omitempty"`
}

// StorageInfo contains the struct for either DBFS or S3 storage depending on which one is relevant.
type StorageInfo struct {
Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"`
S3 *S3StorageInfo `json:"s3,omitempty"`
}

// SparkNodeAwsAttributes is the struct that determines if the node is a spot instance or not
type SparkNodeAwsAttributes struct {
IsSpot bool `json:"is_spot,omitempty"`
}

// SparkNode encapsulates all the attributes of a node that is part of a databricks cluster
type SparkNode struct {
PrivateIP string `json:"private_ip,omitempty"`
PublicDNS string `json:"public_dns,omitempty"`
Expand All @@ -135,22 +144,26 @@ type SparkNode struct {
HostPrivateIP string `json:"host_private_ip,omitempty"`
}

// TerminationReason encapsulates the termination code and potential parameters
type TerminationReason struct {
Code string `json:"code,omitempty"`
Parameters map[string]string `json:"parameters,omitempty"`
}

// LogSyncStatus encapsulates when the cluster logs were last delivered.
type LogSyncStatus struct {
LastAttempted int64 `json:"last_attempted,omitempty"`
LastException string `json:"last_exception,omitempty"`
}

// ClusterCloudProviderNodeInfo encapsulates the existing quota available from the cloud service provider.
type ClusterCloudProviderNodeInfo struct {
Status []string `json:"status,omitempty"`
AvailableCoreQuota float32 `json:"available_core_quota,omitempty"`
TotalCoreQuota float32 `json:"total_core_quota,omitempty"`
}

// NodeType encapsulates information about a give node when using the list-node-types api
type NodeType struct {
NodeTypeID string `json:"node_type_id,omitempty"`
MemoryMb int32 `json:"memory_mb,omitempty"`
Expand All @@ -161,25 +174,28 @@ type NodeType struct {
NodeInfo *ClusterCloudProviderNodeInfo `json:"node_info,omitempty"`
}

// DockerBasicAuth contains the auth information when fetching containers
type DockerBasicAuth struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
}

// DockerImage contains the image url and the auth for DCS
type DockerImage struct {
Url string `json:"url,omitempty"`
URL string `json:"url,omitempty"`
BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"`
}

// Cluster contains the information when trying to submit api calls or editing a cluster
type Cluster struct {
ClusterId string `json:"cluster_id,omitempty"`
NumWorkers int32 `json:"num_workers,omitempty"`
Autoscale *AutoScale `json:"autoscale,omitempty"`
ClusterName string `json:"cluster_name,omitempty"`
SparkVersion string `json:"spark_version,omitempty"`
SparkConf map[string]string `json:"spark_conf,omitempty"`
AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
NodeTypeID string `json:"node_type_id,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
NumWorkers int32 `json:"num_workers,omitempty"`
Autoscale *AutoScale `json:"autoscale,omitempty"`
ClusterName string `json:"cluster_name,omitempty"`
SparkVersion string `json:"spark_version,omitempty"`
SparkConf map[string]string `json:"spark_conf,omitempty"`
AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
NodeTypeID string `json:"node_type_id,omitempty"`
DriverNodeTypeID string `json:"driver_node_type_id,omitempty"`
SSHPublicKeys []string `json:"ssh_public_keys,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"`
Expand All @@ -189,10 +205,11 @@ type Cluster struct {
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"`
EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
InstancePoolId string `json:"instance_pool_id,omitempty"`
InstancePoolID string `json:"instance_pool_id,omitempty"`
IdempotencyToken string `json:"idempotency_token,omitempty"`
}

// ClusterInfo contains the information when getting cluster info from the get request.
type ClusterInfo struct {
NumWorkers int32 `json:"num_workers,omitempty"`
AutoScale *AutoScale `json:"autoscale,omitempty"`
Expand All @@ -208,20 +225,20 @@ type ClusterInfo struct {
AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
NodeTypeID string `json:"node_type_id,omitempty"`
DriverNodeTypeID string `json:"driver_node_type_id,omitempty"`
SSHPublicKeys []string `json:"ssh_public_keys,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"`
ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"`
InitScripts []StorageInfo `json:"init_scripts,omitempty"`
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"`
EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
InstancePoolId string `json:"instance_pool_id,omitempty"`
ClusterSource AwsAvailability `json:"cluster_source,omitempty"`
DockerImage *DockerImage `json:"docker_image,omitempty"`
State ClusterState `json:"state,omitempty"`
StateMessage string `json:"state_message,omitempty"`
StartTime int64 `json:"start_time,omitempty"`
TerminateTime int64 `json:"terminate_time,omitempty"`
SSHPublicKeys []string `json:"ssh_public_keys,omitempty"`
CustomTags map[string]string `json:"custom_tags,omitempty"`
ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"`
InitScripts []StorageInfo `json:"init_scripts,omitempty"`
SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"`
EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
InstancePoolID string `json:"instance_pool_id,omitempty"`
ClusterSource AwsAvailability `json:"cluster_source,omitempty"`
DockerImage *DockerImage `json:"docker_image,omitempty"`
State ClusterState `json:"state,omitempty"`
StateMessage string `json:"state_message,omitempty"`
StartTime int64 `json:"start_time,omitempty"`
TerminateTime int64 `json:"terminate_time,omitempty"`
LastStateLossTime int64 `json:"last_state_loss_time,omitempty"`
LastActivityTime int64 `json:"last_activity_time,omitempty"`
ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"`
Expand Down
10 changes: 3 additions & 7 deletions client/model/command.go
@@ -1,23 +1,19 @@
package model

// CommandResults is the out put when the command finishes in API 1.2
type CommandResults struct {
ResultType string `json:"resultType,omitempty"`
Summary string `json:"summary,omitempty"`
Cause string `json:"cause,omitempty"`
Data interface{} `json:"data,omitempty"`
Schema interface{} `json:"schema,omitempty"`
Truncated bool `json:"truncated,omitempty"`
IsJsonSchema bool `json:"isJsonSchema,omitempty"`
IsJSONSchema bool `json:"isJsonSchema,omitempty"`
}

// Command is the struct that contains what the 1.2 api returns for the commands api
type Command struct {
ID string `json:"id,omitempty"`
Status string `json:"status,omitempty"`
Results *CommandResults `json:"results,omitempty"`
}

type ExecutionContext struct {
ContextId string `json:"contextId,omitempty"`
ClusterId string `json:"clusterId,omitempty"`
Language Language `json:"language,omitempty"`
}
1 change: 1 addition & 0 deletions client/model/dbfs.go
@@ -1,5 +1,6 @@
package model

// FileInfo contains information when listing files or fetching files from DBFS api
type FileInfo struct {
Path string `json:"path,omitempty"`
IsDir bool `json:"is_dir,omitempty"`
Expand Down
12 changes: 12 additions & 0 deletions client/model/group.go
@@ -1,23 +1,33 @@
package model

// GroupMember contains information of a member in a scim group
type GroupMember struct {
Display string `json:"display,omitempty"`
Value string `json:"value,omitempty"`
Ref string `json:"$ref,omitempty"`
}

// ValueListItem is a struct that contains a field Value.
// This is for the scim api.
type ValueListItem struct {
Value string `json:"value,omitempty"`
}

// GroupPathType describes the possible paths in the SCIM RFC for patch operations
type GroupPathType string

const (
// GroupMembersPath is the members path for SCIM patch operation.
GroupMembersPath GroupPathType = "members"

// GroupRolesPath is the roles path for SCIM patch operation.
GroupRolesPath GroupPathType = "roles"

// GroupEntitlementsPath is the entitlements path for SCIM patch operation.
GroupEntitlementsPath GroupPathType = "entitlements"
)

// Group contains information about the SCIM group
type Group struct {
ID string `json:"id,omitempty"`
Schemas []URN `json:"schemas,omitempty"`
Expand All @@ -30,6 +40,7 @@ type Group struct {
InheritedRoles []RoleListItem `json:"inherited_roles,omitempty"`
}

// GroupList contains a list of groups fetched from a list api call from SCIM api
type GroupList struct {
TotalResults int32 `json:"totalResults,omitempty"`
StartIndex int32 `json:"startIndex,omitempty"`
Expand All @@ -38,6 +49,7 @@ type GroupList struct {
Resources []Group `json:"resources,omitempty"`
}

// GroupPatchRequest contains a request structure to make a patch op against SCIM api
type GroupPatchRequest struct {
Schemas []URN `json:"schemas,omitempty"`
Operations []GroupPatchOperations `json:"Operations,omitempty"`
Expand Down

0 comments on commit 0bbf3e4

Please sign in to comment.