Skip to content

Commit

Permalink
Merge branch 'ds/fix-mountclustermissing' of https://github.com/store…
Browse files Browse the repository at this point in the history
…y247/databricks-terraform into ds/fix-mountclustermissing
  • Loading branch information
Dave Storey committed Jun 10, 2020
2 parents 11885f2 + d6b3504 commit 164ea0b
Show file tree
Hide file tree
Showing 218 changed files with 7,036 additions and 13,014 deletions.
60 changes: 59 additions & 1 deletion databricks/provider.go
Expand Up @@ -9,6 +9,9 @@ import (
"github.com/databrickslabs/databricks-terraform/client/service"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"

homedir "github.com/mitchellh/go-homedir"
ini "gopkg.in/ini.v1"
)

// Provider returns the entire terraform provider object
Expand Down Expand Up @@ -80,6 +83,22 @@ func Provider(version string) terraform.ResourceProvider {
},
ConflictsWith: []string{"token"},
},
"config_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_CONFIG_FILE", "~/.databrickscfg"),
Description: "Location of the Databricks CLI credentials file, that is created\n" +
"by `databricks configure --token` command. By default, it is located\n" +
"in ~/.databrickscfg. Check https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication for docs. Config\n" +
"file credetials will only be used when host/token are not provided.",
},
"profile": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "DEFAULT",
Description: "Connection profile specified within ~/.databrickscfg. Please check\n" +
"https://docs.databricks.com/dev-tools/cli/index.html#connection-profiles for documentation.",
},
"azure_auth": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Expand Down Expand Up @@ -226,6 +245,41 @@ func providerConfigureAzureClient(d *schema.ResourceData, providerVersion string
return &dbClient, nil
}

// tryDatabricksCliConfigFile sets Host and Token from ~/.databrickscfg file if it exists
func tryDatabricksCliConfigFile(d *schema.ResourceData, config *service.DBApiClientConfig) error {
configFile, err := homedir.Expand(d.Get("config_file").(string))
if err != nil {
return err
}
cfg, err := ini.Load(configFile)
if err != nil {
return fmt.Errorf("Authentication is not configured for provider. Please configure it\n"+
"through one of the following options:\n"+
"1. DATABRICKS_HOST + DATABRICKS_TOKEN environment variables.\n"+
"2. host + token provider arguments.\n"+
"3. Run `databricks configure --token` that will create %s file.\n\n"+
"Please check https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication for details", configFile)
}
if profile, ok := d.GetOk("profile"); ok {
dbcliConfig := cfg.Section(profile.(string))
token := dbcliConfig.Key("token").String()
if "" == token {
return fmt.Errorf("config file %s is corrupt: cannot find token in %s profile",
configFile, profile)
}
config.Token = token

host := dbcliConfig.Key("host").String()
if "" == host {
return fmt.Errorf("config file %s is corrupt: cannot find host in %s profile",
configFile, profile)
}
config.Host = host
}

return nil
}

func providerConfigure(d *schema.ResourceData, providerVersion string) (interface{}, error) {
var config service.DBApiClientConfig
// Call setup to configure retryable httpclient
Expand All @@ -241,7 +295,11 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac
if token, ok := d.GetOk("token"); ok {
config.Token = token.(string)
}

if config.Host == "" || config.Token == "" {
if err := tryDatabricksCliConfigFile(d, &config); err != nil {
return nil, fmt.Errorf("failed to get credentials from config file; error msg: %w", err)
}
}
// Basic authentication setup via username and password
if _, ok := d.GetOk("basic_auth"); ok {
username, userOk := d.GetOk("basic_auth.0.username")
Expand Down
91 changes: 89 additions & 2 deletions databricks/provider_test.go
Expand Up @@ -3,15 +3,18 @@ package databricks
import (
"encoding/base64"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"log"
"os"
"testing"

"github.com/databrickslabs/databricks-terraform/client/service"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"

"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"

"github.com/databrickslabs/databricks-terraform/client/service"
)

var testAccProviders map[string]terraform.ResourceProvider
Expand Down Expand Up @@ -98,3 +101,87 @@ resource "databricks_scim_group" "my-group-azure3" {
}
`
}

func TestProvider(t *testing.T) {
if err := testAccProvider.InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}

func TestProvider_NoOptionsResultsInError(t *testing.T) {
var provider = Provider("")
var raw = make(map[string]interface{})
raw["config_file"] = "testdata/.databrickscfg_non_existant"
err := provider.Configure(terraform.NewResourceConfigRaw(raw))
assert.NotNil(t, err)
}

func TestProvider_HostTokensTakePrecedence(t *testing.T) {
var raw = make(map[string]interface{})
raw["host"] = "foo"
raw["token"] = "configured"
raw["config_file"] = "testdata/.databrickscfg"
err := testAccProvider.Configure(terraform.NewResourceConfigRaw(raw))
assert.Nil(t, err)

client := testAccProvider.Meta().(*service.DBApiClient).Config
assert.Equal(t, "configured", client.Token)
}

func TestProvider_MissingEnvMakesConfigRead(t *testing.T) {
var raw = make(map[string]interface{})
raw["token"] = "configured"
raw["config_file"] = "testdata/.databrickscfg"
err := testAccProvider.Configure(terraform.NewResourceConfigRaw(raw))
assert.Nil(t, err)

client := testAccProvider.Meta().(*service.DBApiClient).Config
assert.Equal(t, "PT0+IC9kZXYvdXJhbmRvbSA8PT0KYFZ", client.Token)
}

func TestProvider_NoHostGivesError(t *testing.T) {
var raw = make(map[string]interface{})
raw["config_file"] = "testdata/.databrickscfg"
raw["profile"] = "nohost"
err := testAccProvider.Configure(terraform.NewResourceConfigRaw(raw))
assert.NotNil(t, err)
}

func TestProvider_NoTokenGivesError(t *testing.T) {
var raw = make(map[string]interface{})
raw["config_file"] = "testdata/.databrickscfg"
raw["profile"] = "notoken"
err := testAccProvider.Configure(terraform.NewResourceConfigRaw(raw))
assert.NotNil(t, err)
}

func TestProvider_InvalidProfileGivesError(t *testing.T) {
var raw = make(map[string]interface{})
raw["config_file"] = "testdata/.databrickscfg"
raw["profile"] = "invalidhost"
err := testAccProvider.Configure(terraform.NewResourceConfigRaw(raw))
assert.NotNil(t, err)
}

func TestProvider_InvalidConfigFilePath(t *testing.T) {
var raw = make(map[string]interface{})
raw["config_file"] = "testdata/.invalid file"
raw["profile"] = "invalidhost"
err := testAccProvider.Configure(terraform.NewResourceConfigRaw(raw))
log.Println(err)
assert.NotNil(t, err)
}

func TestAccDatabricksCliConfigWorks(t *testing.T) {
resource.Test(t,
resource.TestCase{
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: `provider "databricks" {}`,
ExpectNonEmptyPlan: true,
},
},
},
)
}
2 changes: 1 addition & 1 deletion databricks/resource_databricks_cluster.go
Expand Up @@ -1237,7 +1237,7 @@ func parseSchemaToCluster(d *schema.ResourceData, schemaAttPrefix string) model.
}
if ebsVolumeCount, ok := awsAttributesMap["ebs_volume_count"]; ok {
//val, _ := strconv.ParseInt(ebsVolumeCount.(string), 10, 32)
awsAttributes.FirstOnDemand = int32(ebsVolumeCount.(int))
awsAttributes.EbsVolumeCount = int32(ebsVolumeCount.(int))
}
if ebsVolumeSize, ok := awsAttributesMap["ebs_volume_size"]; ok {
//val, _ := strconv.ParseInt(ebsVolumeSize.(string), 10, 32)
Expand Down
2 changes: 1 addition & 1 deletion databricks/resource_databricks_job.go
Expand Up @@ -982,7 +982,7 @@ func parseSchemaToJobSettings(d *schema.ResourceData) model.JobSettings {
}

if _, ok := d.GetOk("new_cluster.0"); ok {
cluster := parseSchemaToCluster(d, "new_cluster.0")
cluster := parseSchemaToCluster(d, "new_cluster.0.")
jobSettings.NewCluster = &cluster
}

Expand Down
143 changes: 143 additions & 0 deletions databricks/resource_databricks_job_aws_test.go
@@ -0,0 +1,143 @@
package databricks

import (
"errors"
"fmt"
"github.com/databrickslabs/databricks-terraform/client/model"
"github.com/databrickslabs/databricks-terraform/client/service"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/stretchr/testify/assert"
"strconv"
"testing"
)

func TestAccAwsJobResource(t *testing.T) {
//var secretScope model.Secre
var job model.Job
// generate a random name for each tokenInfo test run, to avoid
// collisions from multiple concurrent tests.
// the acctest package includes many helpers such as RandStringFromCharSet
// See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest

resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testAwsJobResourceDestroy,
Steps: []resource.TestStep{
{
// use a dynamic configuration with the random name from above
Config: testAwsJobResourceNewCluster(),
// compose a basic test, checking both remote and local values
Check: resource.ComposeTestCheckFunc(
// query the API to retrieve the tokenInfo object
testAwsJobResourceExists("databricks_job.my_job", &job, t),
// verify remote values
testAwsJobValuesNewCluster(t, &job),
),
Destroy: false,
},
},
})
}

func testAwsJobResourceDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*service.DBApiClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "databricks_job" {
continue
}
idInt, err := strconv.ParseInt(rs.Primary.ID, 10, 32)
if err != nil {
return err
}
_, err = client.Jobs().Read(idInt)
if err != nil {
return nil
}
return errors.New("resource job is not cleaned up")
}
return nil
}

// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget.
func testAwsJobResourceExists(n string, job *model.Job, t *testing.T) resource.TestCheckFunc {
return func(s *terraform.State) error {
// find the corresponding state object
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}

// retrieve the configured client from the test setup
conn := testAccProvider.Meta().(*service.DBApiClient)
idInt, err := strconv.ParseInt(rs.Primary.ID, 10, 32)
if err != nil {
return err
}
resp, err := conn.Jobs().Read(idInt)
if err != nil {
return err
}

// If no error, assign the response Widget attribute to the widget pointer
*job = resp
return nil
}
}

// Assertions are based off of the resource definition defined in function: testAwsJobResourceNewCluster
func testAwsJobValuesNewCluster(t *testing.T, job *model.Job) resource.TestCheckFunc {
return func(s *terraform.State) error {
assert.NotNil(t, job.Settings)
assert.NotNil(t, job.Settings.NewCluster)
assert.NotNil(t, job.Settings.NewCluster.Autoscale)
assert.NotNil(t, job.Settings.NewCluster.AwsAttributes)
assert.NotNil(t, job.Settings.NotebookTask)
assert.Equal(t, 2, int(job.Settings.NewCluster.Autoscale.MinWorkers))
assert.Equal(t, 3, int(job.Settings.NewCluster.Autoscale.MaxWorkers))
assert.Equal(t, "6.4.x-scala2.11", job.Settings.NewCluster.SparkVersion)
assert.Equal(t, model.AwsAvailability(model.AwsAvailabilitySpot), job.Settings.NewCluster.AwsAttributes.Availability)
assert.Equal(t, "us-east-1a", job.Settings.NewCluster.AwsAttributes.ZoneID)
assert.Equal(t, 100, int(job.Settings.NewCluster.AwsAttributes.SpotBidPricePercent))
assert.Equal(t, 1, int(job.Settings.NewCluster.AwsAttributes.FirstOnDemand))
assert.Equal(t, model.EbsVolumeType(model.EbsVolumeTypeGeneralPurposeSsd), job.Settings.NewCluster.AwsAttributes.EbsVolumeType)
assert.Equal(t, 1, int(job.Settings.NewCluster.AwsAttributes.EbsVolumeCount))
assert.Equal(t, 32, int(job.Settings.NewCluster.AwsAttributes.EbsVolumeSize))
assert.Equal(t, "r3.xlarge", job.Settings.NewCluster.NodeTypeID)
assert.Equal(t, "/Users/jane.doe@databricks.com/my-demo-notebook", job.Settings.NotebookTask.NotebookPath)
assert.Equal(t, "my-demo-notebook", job.Settings.Name)
assert.Equal(t, 3600, int(job.Settings.TimeoutSeconds))
assert.Equal(t, 1, int(job.Settings.MaxRetries))
assert.Equal(t, 1, int(job.Settings.MaxConcurrentRuns))
return nil
}
}

func testAwsJobResourceNewCluster() string {
return fmt.Sprintf(`
resource "databricks_job" "my_job" {
new_cluster {
autoscale {
min_workers = 2
max_workers = 3
}
spark_version = "6.4.x-scala2.11"
aws_attributes {
availability = "SPOT"
zone_id = "us-east-1a"
spot_bid_price_percent = "100"
first_on_demand = 1
ebs_volume_type = "GENERAL_PURPOSE_SSD"
ebs_volume_count = 1
ebs_volume_size = 32
}
node_type_id = "r3.xlarge"
}
notebook_path = "/Users/jane.doe@databricks.com/my-demo-notebook"
name = "my-demo-notebook"
timeout_seconds = 3600
max_retries = 1
max_concurrent_runs = 1
}
`)
}

0 comments on commit 164ea0b

Please sign in to comment.