diff --git a/databricks/provider.go b/databricks/provider.go index 1d1680bd1..5a9d42024 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -286,7 +286,7 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac config.Setup() //version information from go-releaser using -ldflags to tell the golang linker to send semver info - config.UserAgent = fmt.Sprintf("databricks-tf-provider-%s", providerVersion) + config.UserAgent = fmt.Sprintf("databricks-tf-provider/%s", providerVersion) if _, ok := d.GetOk("azure_auth"); !ok { if host, ok := d.GetOk("host"); ok { @@ -295,11 +295,7 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac if token, ok := d.GetOk("token"); ok { config.Token = token.(string) } - if config.Host == "" || config.Token == "" { - if err := tryDatabricksCliConfigFile(d, &config); err != nil { - return nil, fmt.Errorf("failed to get credentials from config file; error msg: %w", err) - } - } + // Basic authentication setup via username and password if _, ok := d.GetOk("basic_auth"); ok { username, userOk := d.GetOk("basic_auth.0.username") @@ -310,6 +306,14 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac config.AuthType = service.BasicAuth } } + + // Final catch all in case basic_auth/token + host is not setup + if config.Host == "" || config.Token == "" { + if err := tryDatabricksCliConfigFile(d, &config); err != nil { + return nil, fmt.Errorf("failed to get credentials from config file; error msg: %w", err) + } + } + } else { // Abstracted logic to another function that returns a interface{}, error to inject directly // for the providers during cloud integration testing diff --git a/databricks/provider_test.go b/databricks/provider_test.go index ed428e25c..49273bf60 100644 --- a/databricks/provider_test.go +++ b/databricks/provider_test.go @@ -128,6 +128,20 @@ func TestProvider_HostTokensTakePrecedence(t *testing.T) { assert.Equal(t, "configured", client.Token) } +func TestProvider_BasicAuthTakePrecedence(t *testing.T) { + var raw = make(map[string]interface{}) + raw["host"] = "foo" + raw["basic_auth"] = []interface{}{map[string]interface{}{"username": "user", "password": "pass"}} + raw["config_file"] = "testdata/.databrickscfg" + err := testAccProvider.Configure(terraform.NewResourceConfigRaw(raw)) + assert.Nil(t, err) + + // Basic auth convention + expectedToken := base64.StdEncoding.EncodeToString([]byte("user:pass")) + client := testAccProvider.Meta().(*service.DBApiClient).Config + assert.Equal(t, expectedToken, client.Token) +} + func TestProvider_MissingEnvMakesConfigRead(t *testing.T) { var raw = make(map[string]interface{}) raw["token"] = "configured" diff --git a/databricks/resource_databricks_job_aws_test.go b/databricks/resource_databricks_job_aws_test.go index 011b89db7..972e08cea 100644 --- a/databricks/resource_databricks_job_aws_test.go +++ b/databricks/resource_databricks_job_aws_test.go @@ -95,20 +95,20 @@ func testAwsJobValuesNewCluster(t *testing.T, job *model.Job) resource.TestCheck assert.NotNil(t, job.Settings.NotebookTask) assert.Equal(t, 2, int(job.Settings.NewCluster.Autoscale.MinWorkers)) assert.Equal(t, 3, int(job.Settings.NewCluster.Autoscale.MaxWorkers)) - assert.Equal(t, "6.4.x-scala2.11", job.Settings.NewCluster.SparkVersion) - assert.Equal(t, model.AwsAvailability(model.AwsAvailabilitySpot), job.Settings.NewCluster.AwsAttributes.Availability) - assert.Equal(t, "us-east-1a", job.Settings.NewCluster.AwsAttributes.ZoneID) - assert.Equal(t, 100, int(job.Settings.NewCluster.AwsAttributes.SpotBidPricePercent)) - assert.Equal(t, 1, int(job.Settings.NewCluster.AwsAttributes.FirstOnDemand)) - assert.Equal(t, model.EbsVolumeType(model.EbsVolumeTypeGeneralPurposeSsd), job.Settings.NewCluster.AwsAttributes.EbsVolumeType) - assert.Equal(t, 1, int(job.Settings.NewCluster.AwsAttributes.EbsVolumeCount)) - assert.Equal(t, 32, int(job.Settings.NewCluster.AwsAttributes.EbsVolumeSize)) - assert.Equal(t, "r3.xlarge", job.Settings.NewCluster.NodeTypeID) - assert.Equal(t, "/Users/jane.doe@databricks.com/my-demo-notebook", job.Settings.NotebookTask.NotebookPath) - assert.Equal(t, "my-demo-notebook", job.Settings.Name) - assert.Equal(t, 3600, int(job.Settings.TimeoutSeconds)) - assert.Equal(t, 1, int(job.Settings.MaxRetries)) - assert.Equal(t, 1, int(job.Settings.MaxConcurrentRuns)) + assert.Equal(t, "6.4.x-scala2.11", job.Settings.NewCluster.SparkVersion) + assert.Equal(t, model.AwsAvailability(model.AwsAvailabilitySpot), job.Settings.NewCluster.AwsAttributes.Availability) + assert.Equal(t, "us-east-1a", job.Settings.NewCluster.AwsAttributes.ZoneID) + assert.Equal(t, 100, int(job.Settings.NewCluster.AwsAttributes.SpotBidPricePercent)) + assert.Equal(t, 1, int(job.Settings.NewCluster.AwsAttributes.FirstOnDemand)) + assert.Equal(t, model.EbsVolumeType(model.EbsVolumeTypeGeneralPurposeSsd), job.Settings.NewCluster.AwsAttributes.EbsVolumeType) + assert.Equal(t, 1, int(job.Settings.NewCluster.AwsAttributes.EbsVolumeCount)) + assert.Equal(t, 32, int(job.Settings.NewCluster.AwsAttributes.EbsVolumeSize)) + assert.Equal(t, "r3.xlarge", job.Settings.NewCluster.NodeTypeID) + assert.Equal(t, "/Users/jane.doe@databricks.com/my-demo-notebook", job.Settings.NotebookTask.NotebookPath) + assert.Equal(t, "my-demo-notebook", job.Settings.Name) + assert.Equal(t, 3600, int(job.Settings.TimeoutSeconds)) + assert.Equal(t, 1, int(job.Settings.MaxRetries)) + assert.Equal(t, 1, int(job.Settings.MaxConcurrentRuns)) return nil } } diff --git a/databricks/resource_databricks_job_azure_test.go b/databricks/resource_databricks_job_azure_test.go index 5347063fd..2abab1ec0 100644 --- a/databricks/resource_databricks_job_azure_test.go +++ b/databricks/resource_databricks_job_azure_test.go @@ -94,13 +94,13 @@ func testAzureJobValuesNewCluster(t *testing.T, job *model.Job) resource.TestChe assert.NotNil(t, job.Settings.NotebookTask) assert.Equal(t, 2, int(job.Settings.NewCluster.Autoscale.MinWorkers)) assert.Equal(t, 3, int(job.Settings.NewCluster.Autoscale.MaxWorkers)) - assert.Equal(t, "6.4.x-scala2.11", job.Settings.NewCluster.SparkVersion) - assert.Equal(t, "Standard_DS3_v2", job.Settings.NewCluster.NodeTypeID) - assert.Equal(t, "/Users/jane.doe@databricks.com/my-demo-notebook", job.Settings.NotebookTask.NotebookPath) - assert.Equal(t, "my-demo-notebook", job.Settings.Name) - assert.Equal(t, 3600, int(job.Settings.TimeoutSeconds)) - assert.Equal(t, 1, int(job.Settings.MaxRetries)) - assert.Equal(t, 1, int(job.Settings.MaxConcurrentRuns)) + assert.Equal(t, "6.4.x-scala2.11", job.Settings.NewCluster.SparkVersion) + assert.Equal(t, "Standard_DS3_v2", job.Settings.NewCluster.NodeTypeID) + assert.Equal(t, "/Users/jane.doe@databricks.com/my-demo-notebook", job.Settings.NotebookTask.NotebookPath) + assert.Equal(t, "my-demo-notebook", job.Settings.Name) + assert.Equal(t, 3600, int(job.Settings.TimeoutSeconds)) + assert.Equal(t, 1, int(job.Settings.MaxRetries)) + assert.Equal(t, 1, int(job.Settings.MaxConcurrentRuns)) return nil } }