Skip to content

Commit

Permalink
added go formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
stikkireddy committed Apr 19, 2020
1 parent e1f6f16 commit 55cd215
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 58 deletions.
21 changes: 10 additions & 11 deletions client/service/client.go
Expand Up @@ -23,23 +23,22 @@ const (

type DBApiErrorBody struct {
ErrorCode string `json:"error_code,omitempty"`
Message string `json:"message,omitempty"`
Message string `json:"message,omitempty"`
// The following two are for scim api only for RFC 7644 Section 3.7.3 https://tools.ietf.org/html/rfc7644#section-3.7.3
ScimDetail string `json:"detail,omitempty"`
ScimStatus string `json:"status,omitempty"`
}

type DBApiError struct {
ErrorBody *DBApiErrorBody
ErrorBody *DBApiErrorBody
StatusCode int
Err error
Err error
}

func (r DBApiError) Error() string {
return fmt.Sprintf("status %d: err %v", r.StatusCode, r.Err)
}


type AuthType string

const (
Expand All @@ -50,8 +49,8 @@ const (
type DBApiClientConfig struct {
Host string
Token string
AuthType AuthType
UserAgent string
AuthType AuthType
UserAgent string
DefaultHeaders map[string]string
InsecureSkipVerify bool
TimeoutSeconds int
Expand Down Expand Up @@ -139,8 +138,8 @@ func onlyNBytes(j string, numBytes int64) string {

func auditNonGetPayload(method string, uri string, object interface{}, mask *SecretsMask) {
logStmt := struct {
Method string
Uri string
Method string
Uri string
Payload interface{}
}{
Method: method,
Expand All @@ -158,10 +157,10 @@ func auditNonGetPayload(method string, uri string, object interface{}, mask *Sec
func auditGetPayload(uri string, mask *SecretsMask) {
logStmt := struct {
Method string
Uri string
Uri string
}{
Method: "GET",
Uri: uri,
Method: "GET",
Uri: uri,
}
jsonStr, _ := json.Marshal(Mask(logStmt))
if mask != nil {
Expand Down
21 changes: 10 additions & 11 deletions client/service/clusters_integration_test.go
Expand Up @@ -21,9 +21,9 @@ func TestListClustersIntegration(t *testing.T) {
"PYSPARK_PYTHON": "/databricks/python3/bin/python3",
},
AwsAttributes: &model.AwsAttributes{
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
EbsVolumeCount: 1,
EbsVolumeSize: 32,
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
EbsVolumeCount: 1,
EbsVolumeSize: 32,
},
SparkVersion: "6.2.x-scala2.11",
NodeTypeID: GetCloudInstanceType(client),
Expand All @@ -37,14 +37,13 @@ func TestListClustersIntegration(t *testing.T) {

clusterReadInfo, err := client.Clusters().Get(clusterInfo.ClusterID)
assert.NoError(t, err, err)
assert.True(t, clusterReadInfo.NumWorkers==cluster.NumWorkers)
assert.True(t, clusterReadInfo.ClusterName==cluster.ClusterName)
assert.True(t, clusterReadInfo.NumWorkers == cluster.NumWorkers)
assert.True(t, clusterReadInfo.ClusterName == cluster.ClusterName)
assert.True(t, reflect.DeepEqual(clusterReadInfo.SparkEnvVars, cluster.SparkEnvVars))
assert.True(t, clusterReadInfo.SparkVersion==cluster.SparkVersion)
assert.True(t, clusterReadInfo.NodeTypeID==cluster.NodeTypeID)
assert.True(t, clusterReadInfo.DriverNodeTypeID==cluster.DriverNodeTypeID)
assert.True(t, clusterReadInfo.AutoterminationMinutes==cluster.AutoterminationMinutes)

assert.True(t, clusterReadInfo.SparkVersion == cluster.SparkVersion)
assert.True(t, clusterReadInfo.NodeTypeID == cluster.NodeTypeID)
assert.True(t, clusterReadInfo.DriverNodeTypeID == cluster.DriverNodeTypeID)
assert.True(t, clusterReadInfo.AutoterminationMinutes == cluster.AutoterminationMinutes)

defer func() {
err = client.Clusters().Delete(clusterReadInfo.ClusterID)
Expand Down Expand Up @@ -74,4 +73,4 @@ func TestListClustersIntegration(t *testing.T) {
assert.NoError(t, err, err)
assert.True(t, clusterReadInfo.State == model.ClusterStateRunning)

}
}
11 changes: 4 additions & 7 deletions client/service/commands_integration_test.go
Expand Up @@ -20,9 +20,9 @@ func TestContext(t *testing.T) {
"PYSPARK_PYTHON": "/databricks/python3/bin/python3",
},
AwsAttributes: &model.AwsAttributes{
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
EbsVolumeCount: 1,
EbsVolumeSize: 32,
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
EbsVolumeCount: 1,
EbsVolumeSize: 32,
},
SparkVersion: "6.2.x-scala2.11",
NodeTypeID: GetCloudInstanceType(client),
Expand All @@ -31,15 +31,13 @@ func TestContext(t *testing.T) {
AutoterminationMinutes: 20,
}


clusterInfo, err := client.Clusters().Create(cluster)
assert.NoError(t, err, err)
defer func() {
err := client.Clusters().PermanentDelete(clusterInfo.ClusterID)
assert.NoError(t, err, err)
}()


clusterId := clusterInfo.ClusterID

err = client.Clusters().WaitForClusterRunning(clusterId, 10, 20)
Expand All @@ -49,7 +47,6 @@ func TestContext(t *testing.T) {
assert.NoError(t, err, err)
t.Log(context)


err = client.Commands().waitForContextReady(context, clusterId, 1, 1)
assert.NoError(t, err, err)

Expand All @@ -72,4 +69,4 @@ func TestContext(t *testing.T) {
command, err := client.Commands().Execute(clusterId, "python", "print('hello world')")
assert.NoError(t, err, err)
assert.NotNil(t, command.Results.Data)
}
}
10 changes: 3 additions & 7 deletions client/service/groups_integration_test.go
Expand Up @@ -92,13 +92,10 @@ func TestReadInheritedRolesFromGroup(t *testing.T) {
assert.NoError(t, err, err)
}()


err = client.Groups().Patch(myTestGroup.ID, []string{myTestRole}, nil , model.GroupRolesPath)
err = client.Groups().Patch(myTestGroup.ID, []string{myTestRole}, nil, model.GroupRolesPath)
assert.NoError(t, err, err)



err = client.Groups().Patch(myTestGroup.ID, []string{myTestSubGroup.ID}, nil , model.GroupMembersPath)
err = client.Groups().Patch(myTestGroup.ID, []string{myTestSubGroup.ID}, nil, model.GroupMembersPath)
assert.NoError(t, err, err)

myTestGroupInfo, err := client.Groups().Read(myTestSubGroup.ID)
Expand All @@ -112,7 +109,6 @@ func TestReadInheritedRolesFromGroup(t *testing.T) {
}
}
return false
}(myTestGroupInfo.InheritedRoles, myTestRole) )

}(myTestGroupInfo.InheritedRoles, myTestRole))

}
22 changes: 11 additions & 11 deletions client/service/instance_pools_integration_test.go
Expand Up @@ -13,12 +13,12 @@ func TestInstancePools(t *testing.T) {
client := GetIntegrationDBAPIClient()

pool := model.InstancePool{
InstancePoolName: "my_instance_pool",
MinIdleInstances: 0,
MaxCapacity: 10,
InstancePoolName: "my_instance_pool",
MinIdleInstances: 0,
MaxCapacity: 10,
DiskSpec: &model.InstancePoolDiskSpec{
DiskType: &model.InstancePoolDiskType{
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
DiskType: &model.InstancePoolDiskType{
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
},
DiskCount: 1,
DiskSize: 32,
Expand Down Expand Up @@ -47,13 +47,13 @@ func TestInstancePools(t *testing.T) {
assert.Equal(t, pool.IdleInstanceAutoTerminationMinutes, poolReadInfo.IdleInstanceAutoTerminationMinutes)

err = client.InstancePools().Update(model.InstancePoolInfo{
InstancePoolId: poolReadInfo.InstancePoolId,
InstancePoolName: "my_instance_pool",
MinIdleInstances: 0,
MaxCapacity: 20,
InstancePoolId: poolReadInfo.InstancePoolId,
InstancePoolName: "my_instance_pool",
MinIdleInstances: 0,
MaxCapacity: 20,
DiskSpec: &model.InstancePoolDiskSpec{
DiskType: &model.InstancePoolDiskType{
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
DiskType: &model.InstancePoolDiskType{
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
},
DiskCount: 1,
DiskSize: 32,
Expand Down
11 changes: 3 additions & 8 deletions client/service/libraries_integration_test.go
Expand Up @@ -20,9 +20,9 @@ func TestLibraryCreate(t *testing.T) {
"PYSPARK_PYTHON": "/databricks/python3/bin/python3",
},
AwsAttributes: &model.AwsAttributes{
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
EbsVolumeCount: 1,
EbsVolumeSize: 32,
EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd,
EbsVolumeCount: 1,
EbsVolumeSize: 32,
},
SparkVersion: "6.2.x-scala2.11",
NodeTypeID: GetCloudInstanceType(client),
Expand All @@ -31,21 +31,18 @@ func TestLibraryCreate(t *testing.T) {
AutoterminationMinutes: 20,
}


clusterInfo, err := client.Clusters().Create(cluster)
assert.NoError(t, err, err)
defer func() {
err := client.Clusters().PermanentDelete(clusterInfo.ClusterID)
assert.NoError(t, err, err)
}()


clusterId := clusterInfo.ClusterID

err = client.Clusters().WaitForClusterRunning(clusterId, 10, 20)
assert.NoError(t, err, err)


libraries := []model.Library{
{
Pypi: &model.PyPi{
Expand All @@ -67,8 +64,6 @@ func TestLibraryCreate(t *testing.T) {
assert.NoError(t, err, err)
}()



libraryStatusList, err := client.Libraries().List(clusterId)
assert.NoError(t, err, err)
assert.Equal(t, len(libraryStatusList), len(libraries))
Expand Down
6 changes: 3 additions & 3 deletions client/service/mask_utils.go
Expand Up @@ -44,7 +44,7 @@ func maskRecursive(copy, original reflect.Value, mask bool) {
// we would end up with an actual pointer
case reflect.Interface:
// Get rid of the wrapping interface
if !original.IsZero(){
if !original.IsZero() {
originalValue := original.Elem()
// Create a new object. Now new gives us a pointer, but we want the value it
// points to, so we have to call Elem() to unwrap it
Expand All @@ -58,7 +58,7 @@ func maskRecursive(copy, original reflect.Value, mask bool) {
for i := 0; i < original.NumField(); i += 1 {
//log.Println()
maskValue, maskInStruct := original.Type().Field(i).Tag.Lookup("mask")
maskIsTrue, _ := strconv.ParseBool(maskValue)
maskIsTrue, _ := strconv.ParseBool(maskValue)
maskRecursive(copy.Field(i), original.Field(i), maskInStruct && maskIsTrue)
}

Expand Down Expand Up @@ -107,4 +107,4 @@ func (a SecretsMask) MaskString(str string) string {
placeHolder = strings.ReplaceAll(placeHolder, secret, "[REDACTED]")
}
return placeHolder
}
}

0 comments on commit 55cd215

Please sign in to comment.