From 291b5d748650ae012c02cc4f902ffe1d059f6fcc Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 1 Jun 2020 07:35:02 -0400 Subject: [PATCH 01/10] created the getOrCreateToken function and it will get or create the token if there is a custom authorizing function configured on the client config object. Altered all the client objects to be using pointers to the client object rather than values of the client. --- client/service/apis.go | 32 ++++++++++++++++------------- client/service/client.go | 23 +++++++++++++++++++-- client/service/clusters.go | 2 +- client/service/commands.go | 2 +- client/service/dbfs.go | 2 +- client/service/groups.go | 2 +- client/service/instance_pools.go | 2 +- client/service/instance_profiles.go | 2 +- client/service/jobs.go | 2 +- client/service/libraries.go | 2 +- client/service/notebooks.go | 2 +- client/service/secret_acls.go | 2 +- client/service/secret_scopes.go | 2 +- client/service/secrets.go | 2 +- client/service/users.go | 2 +- 15 files changed, 52 insertions(+), 29 deletions(-) diff --git a/client/service/apis.go b/client/service/apis.go index f821ae0d6..ba408dc4c 100644 --- a/client/service/apis.go +++ b/client/service/apis.go @@ -17,22 +17,22 @@ func (c *DBApiClient) SetConfig(clientConfig *DBApiClientConfig) DBApiClient { } // Clusters returns an instance of ClustersAPI -func (c DBApiClient) Clusters() ClustersAPI { +func (c *DBApiClient) Clusters() ClustersAPI { return ClustersAPI{Client: c} } // Secrets returns an instance of SecretsAPI -func (c DBApiClient) Secrets() SecretsAPI { +func (c *DBApiClient) Secrets() SecretsAPI { return SecretsAPI{Client: c} } // SecretScopes returns an instance of SecretScopesAPI -func (c DBApiClient) SecretScopes() SecretScopesAPI { +func (c *DBApiClient) SecretScopes() SecretScopesAPI { return SecretScopesAPI{Client: c} } // SecretAcls returns an instance of SecretAclsAPI -func (c DBApiClient) SecretAcls() SecretAclsAPI { +func (c *DBApiClient) SecretAcls() SecretAclsAPI { return SecretAclsAPI{Client: c} } @@ -42,50 +42,54 @@ func (c *DBApiClient) Tokens() TokensAPI { } // Users returns an instance of UsersAPI -func (c DBApiClient) Users() UsersAPI { +func (c *DBApiClient) Users() UsersAPI { return UsersAPI{Client: c} } // Groups returns an instance of GroupsAPI -func (c DBApiClient) Groups() GroupsAPI { +func (c *DBApiClient) Groups() GroupsAPI { return GroupsAPI{Client: c} } // Notebooks returns an instance of NotebooksAPI -func (c DBApiClient) Notebooks() NotebooksAPI { +func (c *DBApiClient) Notebooks() NotebooksAPI { return NotebooksAPI{Client: c} } // Jobs returns an instance of JobsAPI -func (c DBApiClient) Jobs() JobsAPI { +func (c *DBApiClient) Jobs() JobsAPI { return JobsAPI{Client: c} } // DBFS returns an instance of DBFSAPI -func (c DBApiClient) DBFS() DBFSAPI { +func (c *DBApiClient) DBFS() DBFSAPI { return DBFSAPI{Client: c} } // Libraries returns an instance of LibrariesAPI -func (c DBApiClient) Libraries() LibrariesAPI { +func (c *DBApiClient) Libraries() LibrariesAPI { return LibrariesAPI{Client: c} } // InstancePools returns an instance of InstancePoolsAPI -func (c DBApiClient) InstancePools() InstancePoolsAPI { +func (c *DBApiClient) InstancePools() InstancePoolsAPI { return InstancePoolsAPI{Client: c} } // InstanceProfiles returns an instance of InstanceProfilesAPI -func (c DBApiClient) InstanceProfiles() InstanceProfilesAPI { +func (c *DBApiClient) InstanceProfiles() InstanceProfilesAPI { return InstanceProfilesAPI{Client: c} } // Commands returns an instance of CommandsAPI -func (c DBApiClient) Commands() CommandsAPI { +func (c *DBApiClient) Commands() CommandsAPI { return CommandsAPI{Client: c} } -func (c DBApiClient) performQuery(method, path string, apiVersion string, headers map[string]string, data interface{}, secretsMask *SecretsMask) ([]byte, error) { +func (c *DBApiClient) performQuery(method, path string, apiVersion string, headers map[string]string, data interface{}, secretsMask *SecretsMask) ([]byte, error) { + err := c.Config.getOrCreateToken() + if err != nil { + return []byte(""), err + } return PerformQuery(c.Config, method, path, apiVersion, headers, true, false, data, secretsMask) } diff --git a/client/service/client.go b/client/service/client.go index f182ad915..4fe2d5f53 100644 --- a/client/service/client.go +++ b/client/service/client.go @@ -12,6 +12,7 @@ import ( "net/url" "reflect" "strings" + "sync" "time" "github.com/google/go-querystring/query" @@ -56,6 +57,8 @@ const ( BasicAuth AuthType = "BASIC" ) +var clientAuthorizerMutex sync.Mutex + // DBApiClientConfig is used to configure the DataBricks Client type DBApiClientConfig struct { Host string @@ -65,12 +68,14 @@ type DBApiClientConfig struct { DefaultHeaders map[string]string InsecureSkipVerify bool TimeoutSeconds int + CustomAuthorizer func(*DBApiClientConfig) error client *retryablehttp.Client } var transientErrorStringMatches []string = []string{ // TODO: Should we make these regexes to match more of the message or is this sufficient? "com.databricks.backend.manager.util.UnknownWorkerEnvironmentException", "does not have any associated worker environments", + "There is no worker environment with id", } // Setup initializes the client @@ -123,7 +128,6 @@ func checkHTTPRetry(ctx context.Context, resp *http.Response, err error) (bool, if err != nil { return false, err } - var errorBody DBApiErrorBody err = json.Unmarshal(body, &errorBody) if err != nil { @@ -145,7 +149,22 @@ func checkHTTPRetry(ctx context.Context, resp *http.Response, err error) (bool, return false, nil } -func (c *DBApiClientConfig) getAuthHeader() map[string]string { +func (c *DBApiClientConfig) getOrCreateToken() error { + if c.CustomAuthorizer != nil { + // Lock incase terraform tries to getOrCreateToken from multiple go routines on the same client ptr. + clientAuthorizerMutex.Lock() + defer clientAuthorizerMutex.Unlock() + if reflect.ValueOf(c.Token).IsZero() { + log.Println("NOT AUTHORIZED SO ATTEMPTING TO AUTHORIZE") + return c.CustomAuthorizer(c) + } + log.Println("ALREADY AUTHORIZED") + return nil + } + return nil +} + +func (c DBApiClientConfig) getAuthHeader() map[string]string { auth := make(map[string]string) if c.AuthType == BasicAuth { auth["Authorization"] = "Basic " + c.Token diff --git a/client/service/clusters.go b/client/service/clusters.go index 53baf405f..d05cfc2ea 100644 --- a/client/service/clusters.go +++ b/client/service/clusters.go @@ -11,7 +11,7 @@ import ( // ClustersAPI is a struct that contains the Databricks api client to perform queries type ClustersAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates a new Spark cluster diff --git a/client/service/commands.go b/client/service/commands.go index 31b3d3110..75330c45e 100644 --- a/client/service/commands.go +++ b/client/service/commands.go @@ -12,7 +12,7 @@ import ( // CommandsAPI exposes the Context & Commands API type CommandsAPI struct { - Client DBApiClient + Client *DBApiClient } // Execute creates a spark context and executes a command and then closes context diff --git a/client/service/dbfs.go b/client/service/dbfs.go index 52794c508..13a7fa7ee 100644 --- a/client/service/dbfs.go +++ b/client/service/dbfs.go @@ -10,7 +10,7 @@ import ( // DBFSAPI exposes the DBFS API type DBFSAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates a file in DBFS given data string in base64 diff --git a/client/service/groups.go b/client/service/groups.go index 4f0e9981c..4cb747f9c 100644 --- a/client/service/groups.go +++ b/client/service/groups.go @@ -12,7 +12,7 @@ import ( // GroupsAPI exposes the scim groups API type GroupsAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates a scim group in the Databricks workspace diff --git a/client/service/instance_pools.go b/client/service/instance_pools.go index 7210e1068..2f6d9f1cf 100644 --- a/client/service/instance_pools.go +++ b/client/service/instance_pools.go @@ -8,7 +8,7 @@ import ( // InstancePoolsAPI exposes the instance pools api type InstancePoolsAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates the instance pool to given the instance pool configuration diff --git a/client/service/instance_profiles.go b/client/service/instance_profiles.go index d2afaa71e..e72d59834 100644 --- a/client/service/instance_profiles.go +++ b/client/service/instance_profiles.go @@ -9,7 +9,7 @@ import ( // InstanceProfilesAPI exposes the instance profiles api on the AWS deployment of Databricks type InstanceProfilesAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates an instance profile record on Databricks diff --git a/client/service/jobs.go b/client/service/jobs.go index c4fef0936..2391dc5c0 100644 --- a/client/service/jobs.go +++ b/client/service/jobs.go @@ -8,7 +8,7 @@ import ( // JobsAPI exposes the Jobs API type JobsAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates a job on the workspace given the job settings diff --git a/client/service/libraries.go b/client/service/libraries.go index 7c18e0fac..37d1aeb5e 100644 --- a/client/service/libraries.go +++ b/client/service/libraries.go @@ -8,7 +8,7 @@ import ( // LibrariesAPI exposes the Library API type LibrariesAPI struct { - Client DBApiClient + Client *DBApiClient } // Create installs the list of libraries given a cluster id diff --git a/client/service/notebooks.go b/client/service/notebooks.go index e178d0c45..9bf018ad1 100644 --- a/client/service/notebooks.go +++ b/client/service/notebooks.go @@ -8,7 +8,7 @@ import ( // NotebooksAPI exposes the Notebooks API type NotebooksAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates a notebook given the content and path diff --git a/client/service/secret_acls.go b/client/service/secret_acls.go index b8a3d4aa5..586d740b3 100644 --- a/client/service/secret_acls.go +++ b/client/service/secret_acls.go @@ -8,7 +8,7 @@ import ( // SecretAclsAPI exposes the Secret ACL API type SecretAclsAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates or overwrites the ACL associated with the given principal (user or group) on the specified scope point diff --git a/client/service/secret_scopes.go b/client/service/secret_scopes.go index f9fb8682d..26c66951a 100644 --- a/client/service/secret_scopes.go +++ b/client/service/secret_scopes.go @@ -9,7 +9,7 @@ import ( // SecretScopesAPI exposes the Secret Scopes API type SecretScopesAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates a new secret scope diff --git a/client/service/secrets.go b/client/service/secrets.go index a939111ef..b5cb2bcae 100644 --- a/client/service/secrets.go +++ b/client/service/secrets.go @@ -9,7 +9,7 @@ import ( // SecretsAPI exposes the Secrets API type SecretsAPI struct { - Client DBApiClient + Client *DBApiClient } // Create creates or modifies a string secret depends on the type of scope backend diff --git a/client/service/users.go b/client/service/users.go index e32b1c6eb..4ad575a47 100644 --- a/client/service/users.go +++ b/client/service/users.go @@ -14,7 +14,7 @@ import ( // UsersAPI exposes the scim user API type UsersAPI struct { - Client DBApiClient + Client *DBApiClient } // Create given a username, displayname, entitlements, and roles will create a scim user via SCIM api From 0f3a5d49dce7c0ea54008f862cc0f45f89e07f67 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 1 Jun 2020 07:39:11 -0400 Subject: [PATCH 02/10] added the customer auth function to authenticate in provider.go during the configuration process, and altered the other resources to refer to the pointer of the client object rather than the value --- databricks/azure_auth.go | 61 +++++++------------ databricks/azure_auth_test.go | 15 ++--- .../data_source_databricks_dbfs_file.go | 2 +- .../data_source_databricks_dbfs_file_paths.go | 2 +- ...ta_source_databricks_default_user_roles.go | 2 +- databricks/data_source_databricks_notebook.go | 2 +- .../data_source_databricks_notebook_paths.go | 2 +- databricks/data_source_databricks_zones.go | 2 +- databricks/mounts.go | 30 ++++----- databricks/provider.go | 12 +++- databricks/provider_test.go | 33 ++++++++++ .../resource_databricks_aws_s3_mount.go | 6 +- ...source_databricks_azure_adls_gen1_mount.go | 6 +- ...source_databricks_azure_adls_gen2_mount.go | 6 +- .../resource_databricks_azure_blob_mount.go | 6 +- ...source_databricks_azure_blob_mount_test.go | 6 +- databricks/resource_databricks_cluster.go | 8 +-- databricks/resource_databricks_dbfs_file.go | 6 +- .../resource_databricks_dbfs_file_sync.go | 10 +-- .../resource_databricks_instance_pool.go | 8 +-- .../resource_databricks_instance_profile.go | 6 +- databricks/resource_databricks_job.go | 8 +-- databricks/resource_databricks_notebook.go | 6 +- databricks/resource_databricks_scim_group.go | 8 +-- ...resource_databricks_scim_group_aws_test.go | 6 +- ...source_databricks_scim_group_azure_test.go | 6 +- databricks/resource_databricks_scim_user.go | 8 +-- .../resource_databricks_scim_user_aws_test.go | 8 +-- ...esource_databricks_scim_user_azure_test.go | 8 +-- databricks/resource_databricks_secret.go | 6 +- databricks/resource_databricks_secret_acl.go | 6 +- .../resource_databricks_secret_acl_test.go | 6 +- .../resource_databricks_secret_scope.go | 6 +- .../resource_databricks_secret_scope_test.go | 6 +- databricks/resource_databricks_secret_test.go | 8 +-- databricks/resource_databricks_token.go | 6 +- databricks/resource_databricks_token_test.go | 6 +- databricks/utils.go | 2 +- 38 files changed, 181 insertions(+), 160 deletions(-) diff --git a/databricks/azure_auth.go b/databricks/azure_auth.go index affa2bcf6..4b8685f71 100644 --- a/databricks/azure_auth.go +++ b/databricks/azure_auth.go @@ -3,13 +3,12 @@ package databricks import ( "encoding/json" "fmt" - "log" - "net/http" - urlParse "net/url" - "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/databrickslabs/databricks-terraform/client/service" + "log" + "net/http" + urlParse "net/url" ) // List of management information @@ -78,27 +77,26 @@ func (a *AzureAuth) getWorkspaceID(config *service.DBApiClientConfig) error { log.Println("[DEBUG] Getting Workspace ID via management token.") // Escape all the ids url := fmt.Sprintf("https://management.azure.com/subscriptions/%s/resourceGroups/%s"+ - "/providers/Microsoft.Databricks/workspaces/%s?api-version=2018-04-01", + "/providers/Microsoft.Databricks/workspaces/%s", urlParse.PathEscape(a.TokenPayload.SubscriptionID), urlParse.PathEscape(a.TokenPayload.ResourceGroup), urlParse.PathEscape(a.TokenPayload.WorkspaceName)) - payload := &WorkspaceRequest{ - Properties: &WsProps{ManagedResourceGroupID: "/subscriptions/" + a.TokenPayload.SubscriptionID + "/resourceGroups/" + a.TokenPayload.ManagedResourceGroup}, - Name: a.TokenPayload.WorkspaceName, - Location: a.TokenPayload.AzureRegion, - } headers := map[string]string{ "Content-Type": "application/json", "cache-control": "no-cache", "Authorization": "Bearer " + a.ManagementToken, } - + type apiVersion struct { + ApiVersion string `url:"api-version"` + } + uriPayload := apiVersion{ + ApiVersion: "2018-04-01", + } var responseMap map[string]interface{} - resp, err := service.PerformQuery(config, http.MethodPut, url, "2.0", headers, true, true, payload, nil) + resp, err := service.PerformQuery(config, http.MethodGet, url, "2.0", headers, false, true, uriPayload, nil) if err != nil { return err } - err = json.Unmarshal(resp, &responseMap) if err != nil { return err @@ -170,53 +168,36 @@ func (a *AzureAuth) getWorkspaceAccessToken(config *service.DBApiClientConfig) e // 2. Get Workspace ID // 3. Get Azure Databricks Platform OAuth Token using Databricks resource id // 4. Get Azure Databricks Workspace Personal Access Token for the SP (60 min duration) -func (a *AzureAuth) initWorkspaceAndGetClient(config *service.DBApiClientConfig) (service.DBApiClient, error) { - var dbClient service.DBApiClient +func (a *AzureAuth) initWorkspaceAndGetClient(config *service.DBApiClientConfig) error { + //var dbClient service.DBApiClient // Get management token err := a.getManagementToken(config) if err != nil { - return dbClient, err + return err } // Get workspace access token err = a.getWorkspaceID(config) if err != nil { - return dbClient, err + return err } // Get platform token err = a.getADBPlatformToken(config) if err != nil { - return dbClient, err + return err } // Get workspace personal access token err = a.getWorkspaceAccessToken(config) if err != nil { - return dbClient, err + return err } - var newOption service.DBApiClientConfig - - // TODO: Eventually change this to include new Databricks domain names. May have to add new vars and/or deprecate existing args. - newOption.Host = "https://" + a.TokenPayload.AzureRegion + ".azuredatabricks.net" - newOption.Token = a.AdbAccessToken + //// TODO: Eventually change this to include new Databricks domain names. May have to add new vars and/or deprecate existing args. + config.Host = "https://" + a.TokenPayload.AzureRegion + ".azuredatabricks.net" + config.Token = a.AdbAccessToken - // Headers to use aad tokens, hidden till tokens support secrets, scopes and acls - //newOption.DefaultHeaders = map[string]string{ - // //"Content-Type": "application/x-www-form-urlencoded", - // "X-Databricks-Azure-Workspace-Resource-Id": a.AdbWorkspaceResourceID, - // "X-Databricks-Azure-SP-Management-Token": a.ManagementToken, - // "cache-control": "no-cache", - //} - dbClient.SetConfig(&newOption) - - // Spin for a while while the workspace comes up and starts behaving. - _, err = dbClient.Clusters().ListNodeTypes() - if err != nil { - return dbClient, err - } - - return dbClient, err + return nil } diff --git a/databricks/azure_auth_test.go b/databricks/azure_auth_test.go index 160871302..11858b668 100644 --- a/databricks/azure_auth_test.go +++ b/databricks/azure_auth_test.go @@ -22,10 +22,10 @@ func TestAzureAuthCreateApiToken(t *testing.T) { azureAuth := AzureAuth{ TokenPayload: &TokenPayload{ - ManagedResourceGroup: os.Getenv("TEST_MANAGED_RESOURCE_GROUP"), - AzureRegion: "centralus", - WorkspaceName: os.Getenv("TEST_WORKSPACE_NAME"), - ResourceGroup: os.Getenv("TEST_RESOURCE_GROUP"), + ManagedResourceGroup: os.Getenv("DATABRICKS_AZURE_MANAGED_RESOURCE_GROUP"), + AzureRegion: os.Getenv("AZURE_REGION"), + WorkspaceName: os.Getenv("DATABRICKS_AZURE_WORKSPACE_NAME"), + ResourceGroup: os.Getenv("DATABRICKS_AZURE_RESOURCE_GROUP"), }, ManagementToken: "", AdbWorkspaceResourceID: "", @@ -36,10 +36,11 @@ func TestAzureAuthCreateApiToken(t *testing.T) { azureAuth.TokenPayload.TenantID = os.Getenv("DATABRICKS_AZURE_TENANT_ID") azureAuth.TokenPayload.ClientID = os.Getenv("DATABRICKS_AZURE_CLIENT_ID") azureAuth.TokenPayload.ClientSecret = os.Getenv("DATABRICKS_AZURE_CLIENT_SECRET") - option := GetIntegrationDBClientOptions() - api, err := azureAuth.initWorkspaceAndGetClient(option) + config := GetIntegrationDBClientOptions() + err := azureAuth.initWorkspaceAndGetClient(config) assert.NoError(t, err, err) - + api := service.DBApiClient{} + api.SetConfig(config) instancePoolInfo, instancePoolErr := api.InstancePools().Create(model.InstancePool{ InstancePoolName: "my_instance_pool", MinIdleInstances: 0, diff --git a/databricks/data_source_databricks_dbfs_file.go b/databricks/data_source_databricks_dbfs_file.go index 4748e93c8..8c6ef6bd9 100644 --- a/databricks/data_source_databricks_dbfs_file.go +++ b/databricks/data_source_databricks_dbfs_file.go @@ -35,7 +35,7 @@ func dataSourceDBFSFile() *schema.Resource { func dataSourceDBFSFileRead(d *schema.ResourceData, m interface{}) error { path := d.Get("path").(string) limitFileSize := d.Get("limit_file_size").(bool) - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) fileInfo, err := client.DBFS().Status(path) if err != nil { diff --git a/databricks/data_source_databricks_dbfs_file_paths.go b/databricks/data_source_databricks_dbfs_file_paths.go index badf68e75..70738ff26 100644 --- a/databricks/data_source_databricks_dbfs_file_paths.go +++ b/databricks/data_source_databricks_dbfs_file_paths.go @@ -43,7 +43,7 @@ func dataSourceDBFSFilePaths() *schema.Resource { func dataSourceDBFSFilePathsRead(d *schema.ResourceData, m interface{}) error { path := d.Get("path").(string) recursive := d.Get("recursive").(bool) - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) paths, err := client.DBFS().List(path, recursive) if err != nil { diff --git a/databricks/data_source_databricks_default_user_roles.go b/databricks/data_source_databricks_default_user_roles.go index cb5cbe8fe..059def6eb 100644 --- a/databricks/data_source_databricks_default_user_roles.go +++ b/databricks/data_source_databricks_default_user_roles.go @@ -8,7 +8,7 @@ import ( func dataSourceDefaultUserRoles() *schema.Resource { return &schema.Resource{ Read: func(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) defaultRolesUserName := d.Get("default_username").(string) metaUser, err := client.Users().GetOrCreateDefaultMetaUser(defaultRolesUserName, defaultRolesUserName, true) diff --git a/databricks/data_source_databricks_notebook.go b/databricks/data_source_databricks_notebook.go index 848333d82..33115e895 100644 --- a/databricks/data_source_databricks_notebook.go +++ b/databricks/data_source_databricks_notebook.go @@ -50,7 +50,7 @@ func dataSourceNotebook() *schema.Resource { func dataSourceNotebookRead(d *schema.ResourceData, m interface{}) error { path := d.Get("path").(string) format := d.Get("format").(string) - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) notebookInfo, err := client.Notebooks().Read(path) if err != nil { diff --git a/databricks/data_source_databricks_notebook_paths.go b/databricks/data_source_databricks_notebook_paths.go index 36b9cbfbb..5f18ea3a2 100644 --- a/databricks/data_source_databricks_notebook_paths.go +++ b/databricks/data_source_databricks_notebook_paths.go @@ -47,7 +47,7 @@ func dataSourceNotebookPathsRead(d *schema.ResourceData, m interface{}) error { path := d.Get("path").(string) recursive := d.Get("recursive").(bool) - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) notebookList, err := client.Notebooks().List(path, recursive) if err != nil { diff --git a/databricks/data_source_databricks_zones.go b/databricks/data_source_databricks_zones.go index 16ea4427a..cf26ecb2f 100644 --- a/databricks/data_source_databricks_zones.go +++ b/databricks/data_source_databricks_zones.go @@ -8,7 +8,7 @@ import ( func dataSourceClusterZones() *schema.Resource { return &schema.Resource{ Read: func(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) zonesInfo, err := client.Clusters().ListZones() if err != nil { diff --git a/databricks/mounts.go b/databricks/mounts.go index c50d66f62..32c9fddd0 100644 --- a/databricks/mounts.go +++ b/databricks/mounts.go @@ -11,9 +11,9 @@ import ( // Mount interface describes the functionality of any mount which is create, read and delete type Mount interface { - Create(client service.DBApiClient, clusterID string) error - Delete(client service.DBApiClient, clusterID string) error - Read(client service.DBApiClient, clusterID string) (string, error) + Create(client *service.DBApiClient, clusterID string) error + Delete(client *service.DBApiClient, clusterID string) error + Read(client *service.DBApiClient, clusterID string) (string, error) } // AWSIamMount describes the object for a aws mount using iam role @@ -29,7 +29,7 @@ func NewAWSIamMount(s3BucketName string, mountName string) *AWSIamMount { } // Create creates an aws iam mount given a cluster ID -func (m AWSIamMount) Create(client service.DBApiClient, clusterID string) error { +func (m AWSIamMount) Create(client *service.DBApiClient, clusterID string) error { iamMountCommand := fmt.Sprintf(` dbutils.fs.mount("s3a://%s", "/mnt/%s") dbutils.fs.ls("/mnt/%s") @@ -47,7 +47,7 @@ dbutils.notebook.exit("success") } // Delete deletes an aws iam mount given a cluster ID -func (m AWSIamMount) Delete(client service.DBApiClient, clusterID string) error { +func (m AWSIamMount) Delete(client *service.DBApiClient, clusterID string) error { iamMountCommand := fmt.Sprintf(` dbutils.fs.unmount("/mnt/%s") dbutils.fs.refreshMounts() @@ -65,7 +65,7 @@ dbutils.notebook.exit("success") } // Read verifies an aws iam mount given a cluster ID -func (m AWSIamMount) Read(client service.DBApiClient, clusterID string) (string, error) { +func (m AWSIamMount) Read(client *service.DBApiClient, clusterID string) (string, error) { iamMountCommand := fmt.Sprintf(` dbutils.fs.ls("/mnt/%s") for mount in dbutils.fs.mounts(): @@ -108,7 +108,7 @@ func NewAzureBlobMount(containerName string, storageAccountName string, director } // Create creates a azure blob storage mount given a cluster id -func (m AzureBlobMount) Create(client service.DBApiClient, clusterID string) error { +func (m AzureBlobMount) Create(client *service.DBApiClient, clusterID string) error { var confKey string if m.AuthType == "SAS" { @@ -139,7 +139,7 @@ dbutils.notebook.exit("success") } // Delete deletes a azure blob storage mount given a cluster id -func (m AzureBlobMount) Delete(client service.DBApiClient, clusterID string) error { +func (m AzureBlobMount) Delete(client *service.DBApiClient, clusterID string) error { iamMountCommand := fmt.Sprintf(` dbutils.fs.unmount("/mnt/%s") dbutils.fs.refreshMounts() @@ -157,7 +157,7 @@ dbutils.notebook.exit("success") } // Read verifies a azure blob storage mount given a cluster id -func (m AzureBlobMount) Read(client service.DBApiClient, clusterID string) (string, error) { +func (m AzureBlobMount) Read(client *service.DBApiClient, clusterID string) (string, error) { iamMountCommand := fmt.Sprintf(` dbutils.fs.ls("/mnt/%s") for mount in dbutils.fs.mounts(): @@ -208,7 +208,7 @@ func NewAzureADLSGen1Mount(storageResource string, directory string, mountName s } // Create creates a azure datalake gen 1 storage mount given a cluster id -func (m AzureADLSGen1Mount) Create(client service.DBApiClient, clusterID string) error { +func (m AzureADLSGen1Mount) Create(client *service.DBApiClient, clusterID string) error { iamMountCommand := fmt.Sprintf(` try: configs = {"%s.oauth2.access.token.provider.type": "ClientCredential", @@ -237,7 +237,7 @@ dbutils.notebook.exit("success") } // Delete deletes a azure datalake gen 1 storage mount given a cluster id -func (m AzureADLSGen1Mount) Delete(client service.DBApiClient, clusterID string) error { +func (m AzureADLSGen1Mount) Delete(client *service.DBApiClient, clusterID string) error { iamMountCommand := fmt.Sprintf(` dbutils.fs.unmount("/mnt/%s") dbutils.fs.refreshMounts() @@ -255,7 +255,7 @@ dbutils.notebook.exit("success") } // Read verifies the azure datalake gen 1 storage mount given a cluster id -func (m AzureADLSGen1Mount) Read(client service.DBApiClient, clusterID string) (string, error) { +func (m AzureADLSGen1Mount) Read(client *service.DBApiClient, clusterID string) (string, error) { iamMountCommand := fmt.Sprintf(` dbutils.fs.ls("/mnt/%s") for mount in dbutils.fs.mounts(): @@ -306,7 +306,7 @@ func NewAzureADLSGen2Mount(containerName string, storageAccountName string, dire } // Create creates a azure datalake gen 2 storage mount -func (m AzureADLSGen2Mount) Create(client service.DBApiClient, clusterID string) error { +func (m AzureADLSGen2Mount) Create(client *service.DBApiClient, clusterID string) error { iamMountCommand := fmt.Sprintf(` try: configs = {"fs.azure.account.auth.type": "OAuth", @@ -339,7 +339,7 @@ dbutils.notebook.exit("success") } // Delete deletes a azure datalake gen 2 storage mount -func (m AzureADLSGen2Mount) Delete(client service.DBApiClient, clusterID string) error { +func (m AzureADLSGen2Mount) Delete(client *service.DBApiClient, clusterID string) error { iamMountCommand := fmt.Sprintf(` dbutils.fs.unmount("/mnt/%s") dbutils.fs.refreshMounts() @@ -357,7 +357,7 @@ dbutils.notebook.exit("success") } // Read verifies the azure datalake gen 2 storage mount -func (m AzureADLSGen2Mount) Read(client service.DBApiClient, clusterID string) (string, error) { +func (m AzureADLSGen2Mount) Read(client *service.DBApiClient, clusterID string) (string, error) { iamMountCommand := fmt.Sprintf(` dbutils.fs.ls("/mnt/%s") for mount in dbutils.fs.mounts(): diff --git a/databricks/provider.go b/databricks/provider.go index 855daad56..5829f8f22 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -187,8 +187,14 @@ func providerConfigureAzureClient(d *schema.ResourceData, providerVersion string AdbAccessToken: "", AdbPlatformToken: "", } - log.Println("Running Azure Auth") - return azureAuthSetup.initWorkspaceAndGetClient(config) + + // Setup the CustomAuthorizer Function to be called at API invoke rather than client invoke + config.CustomAuthorizer = func(config *service.DBApiClientConfig) error { + return azureAuthSetup.initWorkspaceAndGetClient(config) + } + var dbClient service.DBApiClient + dbClient.SetConfig(config) + return &dbClient, nil } func providerConfigure(d *schema.ResourceData, providerVersion string) (interface{}, error) { @@ -214,5 +220,5 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac config.UserAgent = fmt.Sprintf("databricks-tf-provider-%s", providerVersion) var dbClient service.DBApiClient dbClient.SetConfig(&config) - return dbClient, nil + return &dbClient, nil } diff --git a/databricks/provider_test.go b/databricks/provider_test.go index 43d5650bb..011d6ebc9 100644 --- a/databricks/provider_test.go +++ b/databricks/provider_test.go @@ -2,6 +2,7 @@ package databricks import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "log" "os" "testing" @@ -43,3 +44,35 @@ func TestMain(m *testing.M) { code := m.Run() os.Exit(code) } + +func TestAccProviderConfigureAzureSPAuth(t *testing.T) { + resource.Test(t, + resource.TestCase{ + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + PlanOnly: true, + Config: testInitialEmptyWorkspaceClusterDeployment(), + ExpectNonEmptyPlan: true, + }, + }, + }, + ) +} + +func testInitialEmptyWorkspaceClusterDeployment() string { + return ` +provider "databricks" { + azure_auth = { + managed_resource_group = "azurerm_databricks_workspace.demo.managed_resource_group_name" + azure_region = "westus" + workspace_name = "azurerm_databricks_workspace.demo.name" + resource_group = "azurerm_databricks_workspace.demo.resource_group_name" + } +} + +resource "databricks_scim_group" "my-group-azure3" { + display_name = "Test terraform Group3" +} +` +} diff --git a/databricks/resource_databricks_aws_s3_mount.go b/databricks/resource_databricks_aws_s3_mount.go index fd330ac62..f7361d532 100644 --- a/databricks/resource_databricks_aws_s3_mount.go +++ b/databricks/resource_databricks_aws_s3_mount.go @@ -32,7 +32,7 @@ func resourceAWSS3Mount() *schema.Resource { } func resourceAWSS3Create(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -63,7 +63,7 @@ func resourceAWSS3Create(d *schema.ResourceData, m interface{}) error { } func resourceAWSS3Read(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -84,7 +84,7 @@ func resourceAWSS3Read(d *schema.ResourceData, m interface{}) error { } func resourceAWSS3Delete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { diff --git a/databricks/resource_databricks_azure_adls_gen1_mount.go b/databricks/resource_databricks_azure_adls_gen1_mount.go index 15342fe29..856d675a8 100644 --- a/databricks/resource_databricks_azure_adls_gen1_mount.go +++ b/databricks/resource_databricks_azure_adls_gen1_mount.go @@ -77,7 +77,7 @@ func resourceAzureAdlsGen1Mount() *schema.Resource { } func resourceAzureAdlsGen1Create(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -133,7 +133,7 @@ func resourceAzureAdlsGen1Create(d *schema.ResourceData, m interface{}) error { return resourceAzureAdlsGen1Read(d, m) } func resourceAzureAdlsGen1Read(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -174,7 +174,7 @@ func resourceAzureAdlsGen1Read(d *schema.ResourceData, m interface{}) error { } func resourceAzureAdlsGen1Delete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { diff --git a/databricks/resource_databricks_azure_adls_gen2_mount.go b/databricks/resource_databricks_azure_adls_gen2_mount.go index 9b95f5f08..abfd130ad 100644 --- a/databricks/resource_databricks_azure_adls_gen2_mount.go +++ b/databricks/resource_databricks_azure_adls_gen2_mount.go @@ -70,7 +70,7 @@ func resourceAzureAdlsGen2Mount() *schema.Resource { } func resourceAzureAdlsGen2Create(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -127,7 +127,7 @@ func resourceAzureAdlsGen2Create(d *schema.ResourceData, m interface{}) error { return resourceAzureAdlsGen2Read(d, m) } func resourceAzureAdlsGen2Read(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -173,7 +173,7 @@ func resourceAzureAdlsGen2Read(d *schema.ResourceData, m interface{}) error { } func resourceAzureAdlsGen2Delete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { diff --git a/databricks/resource_databricks_azure_blob_mount.go b/databricks/resource_databricks_azure_blob_mount.go index 56a7bdeaf..a2c62ebe3 100644 --- a/databricks/resource_databricks_azure_blob_mount.go +++ b/databricks/resource_databricks_azure_blob_mount.go @@ -71,7 +71,7 @@ func resourceAzureBlobMount() *schema.Resource { } func resourceAzureBlobMountCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -119,7 +119,7 @@ func resourceAzureBlobMountCreate(d *schema.ResourceData, m interface{}) error { return resourceAzureBlobMountRead(d, m) } func resourceAzureBlobMountRead(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { @@ -163,7 +163,7 @@ func resourceAzureBlobMountRead(d *schema.ResourceData, m interface{}) error { } func resourceAzureBlobMountDelete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) clusterID := d.Get("cluster_id").(string) err := changeClusterIntoRunningState(clusterID, client) if err != nil { diff --git a/databricks/resource_databricks_azure_blob_mount_test.go b/databricks/resource_databricks_azure_blob_mount_test.go index b4523a052..afcf66389 100644 --- a/databricks/resource_databricks_azure_blob_mount_test.go +++ b/databricks/resource_databricks_azure_blob_mount_test.go @@ -29,7 +29,7 @@ func TestAccAzureBlobMount_correctly_mounts(t *testing.T) { }, { PreConfig: func() { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) err := azureBlobMount.Delete(client, clusterInfo.ClusterID) assert.NoError(t, err, "TestAccAzureBlobMount_correctly_mounts: Failed to remove the mount.") }, @@ -51,7 +51,7 @@ func testAccAzureBlobMount_cluster_exists(n string, clusterInfo *model.ClusterIn } // retrieve the configured client from the test setup - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) resp, err := client.Clusters().Get(rs.Primary.ID) if err != nil { return err @@ -82,7 +82,7 @@ func testAccAzureBlobMount_mount_exists(n string, azureBlobMount *AzureBlobMount blobMount := NewAzureBlobMount(containerName, storageAccountName, directory, mountName, authType, tokenSecretScope, tokenSecretKey) - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) cluster_id := clusterInfo.ClusterID message, err := blobMount.Read(client, cluster_id) diff --git a/databricks/resource_databricks_cluster.go b/databricks/resource_databricks_cluster.go index ffc0b57e0..c37e2b67e 100644 --- a/databricks/resource_databricks_cluster.go +++ b/databricks/resource_databricks_cluster.go @@ -501,7 +501,7 @@ func convertListInterfaceToString(m []interface{}) []string { func resourceClusterCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) cluster := parseSchemaToCluster(d, "") libraries := parseSchemaToClusterLibraries(d) @@ -536,7 +536,7 @@ func resourceClusterCreate(d *schema.ResourceData, m interface{}) error { } func resourceClusterRead(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() clusterInfo, err := client.Clusters().Get(id) @@ -1039,7 +1039,7 @@ func parseSchemaToClusterLibraries(d *schema.ResourceData) []model.Library { } func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() clusterInfo, err := client.Clusters().Get(id) if err != nil { @@ -1161,7 +1161,7 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { func resourceClusterDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) err := client.Clusters().PermanentDelete(id) if err != nil { diff --git a/databricks/resource_databricks_dbfs_file.go b/databricks/resource_databricks_dbfs_file.go index 2d3311b69..1aa442b86 100644 --- a/databricks/resource_databricks_dbfs_file.go +++ b/databricks/resource_databricks_dbfs_file.go @@ -50,7 +50,7 @@ func resourceDBFSFile() *schema.Resource { } func resourceDBFSFileCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) path := d.Get("path").(string) content := d.Get("content").(string) overwrite := d.Get("overwrite").(bool) @@ -88,7 +88,7 @@ func resourceDBFSFileCreate(d *schema.ResourceData, m interface{}) error { func resourceDBFSFileRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) fileInfo, err := client.DBFS().Status(id) if err != nil { @@ -139,7 +139,7 @@ func resourceDBFSFileUpdate(d *schema.ResourceData, m interface{}) error { } func resourceDBFSFileDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) err := client.DBFS().Delete(id, false) return err } diff --git a/databricks/resource_databricks_dbfs_file_sync.go b/databricks/resource_databricks_dbfs_file_sync.go index 89244098f..094012819 100644 --- a/databricks/resource_databricks_dbfs_file_sync.go +++ b/databricks/resource_databricks_dbfs_file_sync.go @@ -49,7 +49,7 @@ func resourceDBFSFileSync() *schema.Resource { } func resourceDBFSFileSyncCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) srcPath := d.Get("src_path").(string) tgtPath := d.Get("tgt_path").(string) mkdirs := d.Get("mkdirs").(bool) @@ -62,7 +62,7 @@ func resourceDBFSFileSyncCreate(d *schema.ResourceData, m interface{}) error { } } - apiClient := parseSchemaToDBAPIClient(d, &client) + apiClient := parseSchemaToDBAPIClient(d, client) err := client.DBFS().Copy(srcPath, tgtPath, apiClient, true) if err != nil { return err @@ -96,12 +96,12 @@ func resourceDBFSFileSyncUpdate(d *schema.ResourceData, m interface{}) error { } func resourceDBFSFileSyncRead(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) srcPath := d.Get("src_path").(string) tgtPath := d.Get("tgt_path").(string) var srcAPIDBFSClient service.DBFSAPI - srcAPICLient := parseSchemaToDBAPIClient(d, &client) + srcAPICLient := parseSchemaToDBAPIClient(d, client) if srcAPICLient != nil { srcAPIDBFSClient = srcAPICLient.DBFS() } else { @@ -125,7 +125,7 @@ func resourceDBFSFileSyncRead(d *schema.ResourceData, m interface{}) error { } func resourceDBFSFileSyncDelete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := strings.Split(d.Id(), "|||")[1] err := client.DBFS().Delete(id, false) diff --git a/databricks/resource_databricks_instance_pool.go b/databricks/resource_databricks_instance_pool.go index 681b1b208..822e33a21 100644 --- a/databricks/resource_databricks_instance_pool.go +++ b/databricks/resource_databricks_instance_pool.go @@ -160,7 +160,7 @@ func convertMapStringInterfaceToStringString(m map[string]interface{}) map[strin } func resourceInstancePoolCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) var instancePool model.InstancePool var instancePoolAwsAttributes model.InstancePoolAwsAttributes @@ -235,7 +235,7 @@ func resourceInstancePoolCreate(d *schema.ResourceData, m interface{}) error { func resourceInstancePoolRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) instancePoolInfo, err := client.InstancePools().Read(id) if err != nil { if isInstancePoolMissing(err.Error(), id) { @@ -334,7 +334,7 @@ func resourceInstancePoolRead(d *schema.ResourceData, m interface{}) error { func resourceInstancePoolUpdate(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) var instancePoolInfo model.InstancePoolInfo instancePoolInfo.InstancePoolName = d.Get("instance_pool_name").(string) @@ -352,7 +352,7 @@ func resourceInstancePoolUpdate(d *schema.ResourceData, m interface{}) error { } func resourceInstancePoolDelete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() err := client.InstancePools().Delete(id) return err diff --git a/databricks/resource_databricks_instance_profile.go b/databricks/resource_databricks_instance_profile.go index a96dea006..6cae646a6 100644 --- a/databricks/resource_databricks_instance_profile.go +++ b/databricks/resource_databricks_instance_profile.go @@ -30,7 +30,7 @@ func resourceInstanceProfile() *schema.Resource { } func resourceInstanceProfileCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) instanceProfileArn := d.Get("instance_profile_arn").(string) skipValidation := d.Get("skip_validation").(bool) err := client.InstanceProfiles().Create(instanceProfileArn, skipValidation) @@ -48,7 +48,7 @@ func resourceInstanceProfileCreate(d *schema.ResourceData, m interface{}) error func resourceInstanceProfileRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) profile, err := client.InstanceProfiles().Read(id) if err != nil { if isInstanceProfileMissing(err.Error(), id) { @@ -64,7 +64,7 @@ func resourceInstanceProfileRead(d *schema.ResourceData, m interface{}) error { func resourceInstanceProfileDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) err := client.InstanceProfiles().Delete(id) return err } diff --git a/databricks/resource_databricks_job.go b/databricks/resource_databricks_job.go index ad3bb0dc1..08bc41c3f 100644 --- a/databricks/resource_databricks_job.go +++ b/databricks/resource_databricks_job.go @@ -513,7 +513,7 @@ func resourceJob() *schema.Resource { func resourceJobCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) jobSettings := parseSchemaToJobSettings(d) job, err := client.Jobs().Create(jobSettings) @@ -526,7 +526,7 @@ func resourceJobCreate(d *schema.ResourceData, m interface{}) error { } func resourceJobRead(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() idInt, err := strconv.ParseInt(id, 10, 32) if err != nil { @@ -947,7 +947,7 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { } func resourceJobUpdate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() idInt, err := strconv.ParseInt(id, 10, 32) if err != nil { @@ -963,7 +963,7 @@ func resourceJobUpdate(d *schema.ResourceData, m interface{}) error { } func resourceJobDelete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() idInt, err := strconv.ParseInt(id, 10, 32) if err != nil { diff --git a/databricks/resource_databricks_notebook.go b/databricks/resource_databricks_notebook.go index eb7586c05..f750756b2 100644 --- a/databricks/resource_databricks_notebook.go +++ b/databricks/resource_databricks_notebook.go @@ -94,7 +94,7 @@ func resourceNotebook() *schema.Resource { } func resourceNotebookCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) path := d.Get("path").(string) content := d.Get("content").(string) language := d.Get("language").(string) @@ -136,7 +136,7 @@ func resourceNotebookCreate(d *schema.ResourceData, m interface{}) error { func resourceNotebookRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) format := d.Get("format").(string) notebookData, err := client.Notebooks().Export(id, model.ExportFormat(format)) if err != nil { @@ -179,7 +179,7 @@ func resourceNotebookRead(d *schema.ResourceData, m interface{}) error { func resourceNotebookDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) err := client.Notebooks().Delete(id, true) return err } diff --git a/databricks/resource_databricks_scim_group.go b/databricks/resource_databricks_scim_group.go index ad2473168..9ad506d82 100644 --- a/databricks/resource_databricks_scim_group.go +++ b/databricks/resource_databricks_scim_group.go @@ -74,7 +74,7 @@ func resourceScimGroup() *schema.Resource { //} func resourceScimGroupCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) groupName := d.Get("display_name").(string) var members []string @@ -121,7 +121,7 @@ func getListOfEntitlements(entitlementList []model.EntitlementsListItem) []strin func resourceScimGroupRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) group, err := client.Groups().Read(id) if err != nil { if isScimGroupMissing(err.Error(), id) { @@ -177,7 +177,7 @@ func diff(sliceA []string, sliceB []string) []string { func resourceScimGroupUpdate(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) group, err := client.Groups().Read(id) if err != nil { @@ -243,7 +243,7 @@ func resourceScimGroupUpdate(d *schema.ResourceData, m interface{}) error { func resourceScimGroupDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) err := client.Groups().Delete(id) return err } diff --git a/databricks/resource_databricks_scim_group_aws_test.go b/databricks/resource_databricks_scim_group_aws_test.go index 79e15e637..10e46ca63 100644 --- a/databricks/resource_databricks_scim_group_aws_test.go +++ b/databricks/resource_databricks_scim_group_aws_test.go @@ -89,7 +89,7 @@ func TestAccScimGroupResource(t *testing.T) { }, { PreConfig: func() { - err := testAccProvider.Meta().(service.DBApiClient).Groups().Delete(ScimGroup.ID) + err := testAccProvider.Meta().(*service.DBApiClient).Groups().Delete(ScimGroup.ID) assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above @@ -131,7 +131,7 @@ func TestAccScimGroupResource(t *testing.T) { } func testScimGroupResourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_scim_group" { continue @@ -165,7 +165,7 @@ func testScimGroupResourceExists(n string, group *model.Group, t *testing.T) res } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.Groups().Read(rs.Primary.ID) if err != nil { return err diff --git a/databricks/resource_databricks_scim_group_azure_test.go b/databricks/resource_databricks_scim_group_azure_test.go index 343e27410..7603ad639 100644 --- a/databricks/resource_databricks_scim_group_azure_test.go +++ b/databricks/resource_databricks_scim_group_azure_test.go @@ -83,7 +83,7 @@ func TestAccScimGroupResource(t *testing.T) { }, { PreConfig: func() { - err := testAccProvider.Meta().(service.DBApiClient).Groups().Delete(ScimGroup.ID) + err := testAccProvider.Meta().(*service.DBApiClient).Groups().Delete(ScimGroup.ID) assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above @@ -122,7 +122,7 @@ func TestAccScimGroupResource(t *testing.T) { } func testScimGroupResourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_scim_group" { continue @@ -155,7 +155,7 @@ func testScimGroupResourceExists(n string, group *model.Group, t *testing.T) res } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.Groups().Read(rs.Primary.ID) if err != nil { return err diff --git a/databricks/resource_databricks_scim_user.go b/databricks/resource_databricks_scim_user.go index 4250f0f84..3d718153d 100644 --- a/databricks/resource_databricks_scim_user.go +++ b/databricks/resource_databricks_scim_user.go @@ -73,7 +73,7 @@ func convertInterfaceSliceToStringSlice(input []interface{}) []string { } func resourceScimUserCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) userName := d.Get("user_name").(string) setAdmin := d.Get("set_admin").(bool) var displayName string @@ -127,7 +127,7 @@ func getListOfRoles(roleList []model.RoleListItem) []string { func resourceScimUserRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) user, err := client.Users().Read(id) if err != nil { if isScimUserMissing(err.Error(), id) { @@ -200,7 +200,7 @@ func resourceScimUserRead(d *schema.ResourceData, m interface{}) error { func resourceScimUserUpdate(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) userName := d.Get("user_name").(string) var displayName string var roles []string @@ -246,7 +246,7 @@ func resourceScimUserUpdate(d *schema.ResourceData, m interface{}) error { func resourceScimUserDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) err := client.Users().Delete(id) return err } diff --git a/databricks/resource_databricks_scim_user_aws_test.go b/databricks/resource_databricks_scim_user_aws_test.go index 8addd17c2..425ca6417 100644 --- a/databricks/resource_databricks_scim_user_aws_test.go +++ b/databricks/resource_databricks_scim_user_aws_test.go @@ -83,7 +83,7 @@ func TestAccScimUserResource(t *testing.T) { }, { PreConfig: func() { - err := testAccProvider.Meta().(service.DBApiClient).Users().Delete(scimUser.ID) + err := testAccProvider.Meta().(*service.DBApiClient).Users().Delete(scimUser.ID) assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above @@ -104,7 +104,7 @@ func TestAccScimUserResource(t *testing.T) { { //Create a new user PreConfig: func() { - err := testAccProvider.Meta().(service.DBApiClient).Users().Delete(scimUser.ID) + err := testAccProvider.Meta().(*service.DBApiClient).Users().Delete(scimUser.ID) assert.NoError(t, err, err) }, // Create new admin user @@ -162,7 +162,7 @@ func TestAccScimUserResource(t *testing.T) { } func testScimUserResourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_scim_user" { continue @@ -195,7 +195,7 @@ func testScimUserResourceExists(n string, user *model.User, t *testing.T) resour } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.Users().Read(rs.Primary.ID) if err != nil { return err diff --git a/databricks/resource_databricks_scim_user_azure_test.go b/databricks/resource_databricks_scim_user_azure_test.go index 8a07731ba..8dafea399 100644 --- a/databricks/resource_databricks_scim_user_azure_test.go +++ b/databricks/resource_databricks_scim_user_azure_test.go @@ -85,7 +85,7 @@ func TestAccScimUserResource(t *testing.T) { }, { PreConfig: func() { - err := testAccProvider.Meta().(service.DBApiClient).Users().Delete(scimUser.ID) + err := testAccProvider.Meta().(*service.DBApiClient).Users().Delete(scimUser.ID) assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above @@ -106,7 +106,7 @@ func TestAccScimUserResource(t *testing.T) { { //Create a new user PreConfig: func() { - err := testAccProvider.Meta().(service.DBApiClient).Users().Delete(scimUser.ID) + err := testAccProvider.Meta().(*service.DBApiClient).Users().Delete(scimUser.ID) assert.NoError(t, err, err) }, // Create new admin user @@ -164,7 +164,7 @@ func TestAccScimUserResource(t *testing.T) { } func testScimUserResourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_scim_user" { continue @@ -215,7 +215,7 @@ func testScimUserResourceExists(n string, user *model.User, t *testing.T) resour } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.Users().Read(rs.Primary.ID) if err != nil { return err diff --git a/databricks/resource_databricks_secret.go b/databricks/resource_databricks_secret.go index 08e089b84..ac6eb3243 100644 --- a/databricks/resource_databricks_secret.go +++ b/databricks/resource_databricks_secret.go @@ -49,7 +49,7 @@ func getScopeAndKeyFromSecretID(secretIDString string) (string, string, error) { } func resourceSecretCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) scopeName := d.Get("scope").(string) key := d.Get("key").(string) secretValue := d.Get("string_value").(string) @@ -67,7 +67,7 @@ func resourceSecretCreate(d *schema.ResourceData, m interface{}) error { func resourceSecretRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) scope, key, err := getScopeAndKeyFromSecretID(id) if err != nil { return err @@ -99,7 +99,7 @@ func resourceSecretRead(d *schema.ResourceData, m interface{}) error { } func resourceSecretDelete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() scope, key, err := getScopeAndKeyFromSecretID(id) if err != nil { diff --git a/databricks/resource_databricks_secret_acl.go b/databricks/resource_databricks_secret_acl.go index d95c7b7c1..47758497c 100644 --- a/databricks/resource_databricks_secret_acl.go +++ b/databricks/resource_databricks_secret_acl.go @@ -44,7 +44,7 @@ func getScopeAndKeyFromSecretACLID(SecretACLIDString string) (string, string, er } func resourceSecretACLCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) scopeName := d.Get("scope").(string) principal := d.Get("principal").(string) permission := model.ACLPermission(d.Get("permission").(string)) @@ -66,7 +66,7 @@ func resourceSecretACLRead(d *schema.ResourceData, m interface{}) error { if err != nil { return err } - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) secretACL, err := client.SecretAcls().Read(scope, principal) if err != nil { if isSecretACLMissing(err.Error(), scope, principal) { @@ -89,7 +89,7 @@ func resourceSecretACLRead(d *schema.ResourceData, m interface{}) error { } func resourceSecretACLDelete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() scope, key, err := getScopeAndKeyFromSecretACLID(id) if err != nil { diff --git a/databricks/resource_databricks_secret_acl_test.go b/databricks/resource_databricks_secret_acl_test.go index 5f6794fdd..9e78a70c2 100644 --- a/databricks/resource_databricks_secret_acl_test.go +++ b/databricks/resource_databricks_secret_acl_test.go @@ -45,7 +45,7 @@ func TestAccSecretAclResource(t *testing.T) { }, { PreConfig: func() { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) err := client.SecretAcls().Delete(scope, principal) assert.NoError(t, err, err) }, @@ -68,7 +68,7 @@ func TestAccSecretAclResource(t *testing.T) { } func testSecretACLResourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_secret" && rs.Type != "databricks_secret_scope" { continue @@ -103,7 +103,7 @@ func testSecretACLResourceExists(n string, aclItem *model.ACLItem, t *testing.T) } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.SecretAcls().Read(rs.Primary.Attributes["scope"], rs.Primary.Attributes["principal"]) //t.Log(resp) if err != nil { diff --git a/databricks/resource_databricks_secret_scope.go b/databricks/resource_databricks_secret_scope.go index 0abc9de10..a262edc67 100644 --- a/databricks/resource_databricks_secret_scope.go +++ b/databricks/resource_databricks_secret_scope.go @@ -35,7 +35,7 @@ func resourceSecretScope() *schema.Resource { } func resourceSecretScopeCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) scopeName := d.Get("name").(string) initialManagePrincipal := d.Get("initial_manage_principal").(string) err := client.SecretScopes().Create(scopeName, initialManagePrincipal) @@ -47,7 +47,7 @@ func resourceSecretScopeCreate(d *schema.ResourceData, m interface{}) error { } func resourceSecretScopeRead(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() scope, err := client.SecretScopes().Read(id) if err != nil { @@ -68,7 +68,7 @@ func resourceSecretScopeRead(d *schema.ResourceData, m interface{}) error { } func resourceSecretScopeDelete(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) id := d.Id() err := client.SecretScopes().Delete(id) return err diff --git a/databricks/resource_databricks_secret_scope_test.go b/databricks/resource_databricks_secret_scope_test.go index 26f690754..2dad8d270 100644 --- a/databricks/resource_databricks_secret_scope_test.go +++ b/databricks/resource_databricks_secret_scope_test.go @@ -42,7 +42,7 @@ func TestAccSecretScopeResource(t *testing.T) { }, { PreConfig: func() { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) err := client.SecretScopes().Delete(scope) assert.NoError(t, err, err) }, @@ -64,7 +64,7 @@ func TestAccSecretScopeResource(t *testing.T) { } func testSecretScopeResourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_secret_scope" { continue @@ -96,7 +96,7 @@ func testSecretScopeResourceExists(n string, secretScope *model.SecretScope, t * } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.SecretScopes().Read(rs.Primary.ID) //t.Log(resp) if err != nil { diff --git a/databricks/resource_databricks_secret_test.go b/databricks/resource_databricks_secret_test.go index 7358aa3aa..e94a69981 100644 --- a/databricks/resource_databricks_secret_test.go +++ b/databricks/resource_databricks_secret_test.go @@ -47,7 +47,7 @@ func TestAccSecretResource(t *testing.T) { { //Deleting and recreating the secret PreConfig: func() { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) err := client.Secrets().Delete(scope, secret.Key) assert.NoError(t, err, err) }, @@ -68,7 +68,7 @@ func TestAccSecretResource(t *testing.T) { { //Deleting the scope should recreate the secret PreConfig: func() { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) err := client.SecretScopes().Delete(scope) assert.NoError(t, err, err) }, @@ -91,7 +91,7 @@ func TestAccSecretResource(t *testing.T) { } func testSecretResourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_secret" && rs.Type != "databricks_secret_scope" { continue @@ -127,7 +127,7 @@ func testSecretResourceExists(n string, secret *model.SecretMetadata, t *testing } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.Secrets().Read(rs.Primary.Attributes["scope"], rs.Primary.Attributes["key"]) //t.Log(resp) if err != nil { diff --git a/databricks/resource_databricks_token.go b/databricks/resource_databricks_token.go index fc3094176..27ddd26e5 100644 --- a/databricks/resource_databricks_token.go +++ b/databricks/resource_databricks_token.go @@ -47,7 +47,7 @@ func resourceToken() *schema.Resource { } func resourceTokenCreate(d *schema.ResourceData, m interface{}) error { - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) lifeTimeSeconds := d.Get("lifetime_seconds").(int) comment := d.Get("comment").(string) tokenResp, err := client.Tokens().Create(int32(lifeTimeSeconds), comment) @@ -64,7 +64,7 @@ func resourceTokenCreate(d *schema.ResourceData, m interface{}) error { func resourceTokenRead(d *schema.ResourceData, m interface{}) error { id := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) token, err := client.Tokens().Read(id) if err != nil { if isTokenMissing(err.Error(), id) { @@ -84,7 +84,7 @@ func resourceTokenRead(d *schema.ResourceData, m interface{}) error { func resourceTokenDelete(d *schema.ResourceData, m interface{}) error { tokenID := d.Id() - client := m.(service.DBApiClient) + client := m.(*service.DBApiClient) err := client.Tokens().Delete(tokenID) return err } diff --git a/databricks/resource_databricks_token_test.go b/databricks/resource_databricks_token_test.go index 2691093ca..37bb69d83 100644 --- a/databricks/resource_databricks_token_test.go +++ b/databricks/resource_databricks_token_test.go @@ -43,7 +43,7 @@ func TestAccTokenResource(t *testing.T) { { //Deleting and recreating the token PreConfig: func() { - client := testAccProvider.Meta().(service.DBApiClient) + client := testAccProvider.Meta().(*service.DBApiClient) err := client.Tokens().Delete(tokenInfo.TokenID) assert.NoError(t, err, err) }, @@ -65,7 +65,7 @@ func TestAccTokenResource(t *testing.T) { } func testAccCheckTokenResourceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_token" { continue @@ -101,7 +101,7 @@ func testAccCheckTokenResourceExists(n string, tokenInfo *model.TokenInfo, t *te } // retrieve the configured client from the test setup - conn := testAccProvider.Meta().(service.DBApiClient) + conn := testAccProvider.Meta().(*service.DBApiClient) resp, err := conn.Tokens().Read(rs.Primary.ID) if err != nil { return err diff --git a/databricks/utils.go b/databricks/utils.go index 0d7d75e94..64f223770 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -8,7 +8,7 @@ import ( "time" ) -func changeClusterIntoRunningState(clusterID string, client service.DBApiClient) error { +func changeClusterIntoRunningState(clusterID string, client *service.DBApiClient) error { //return nil clusterInfo, err := client.Clusters().Get(clusterID) if err != nil { From a6b509758d1b0ff59ca2f30c039f8c77ba632f3f Mon Sep 17 00:00:00 2001 From: Jordan Jennings Date: Wed, 3 Jun 2020 10:30:36 -0400 Subject: [PATCH 03/10] Fix Dockerfile compatibility with Alpine Linux In order to support Alpine Linux the CGO_ENABLED=0 flag is needed, which is set in .goreleaser.yml but was not set in the Dockerfile. Before this change the binary built via the Dockerfile was not compatible with hashicorp/terraform base image, which is based on Alpine Linux. --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d84aebe09..32b166508 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ WORKDIR /go/src/github.com/databrickslabs/databricks-terraform/ RUN curl -sSL "https://github.com/gotestyourself/gotestsum/releases/download/v0.4.2/gotestsum_0.4.2_linux_amd64.tar.gz" | tar -xz -C /usr/local/bin gotestsum RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.25.0 COPY . . -RUN make vendor build +RUN CGO_ENABLED=0 make vendor build FROM hashicorp/terraform:latest COPY --from=0 /go/src/github.com/databrickslabs/databricks-terraform/terraform-provider-databricks /root/.terraform.d/plugins/ From 89af527d0cb865a0d19591d3e7b3b6273185ae02 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Tue, 9 Jun 2020 16:25:34 -0400 Subject: [PATCH 04/10] initial commit of the multiple workspaces api implementation --- .gitignore | 1 + Makefile | 26 +- client/model/mws.go | 110 ++++++ client/service/apis.go | 25 ++ client/service/main_test.go | 13 + client/service/mws_credentials.go | 77 +++++ .../mws_credentials_integration_test.go | 30 ++ client/service/mws_customer_managed_keys.go | 69 ++++ ..._customer_managed_keys_integration_test.go | 19 ++ client/service/mws_networks.go | 65 ++++ .../service/mws_networks_integration_test.go | 31 ++ client/service/mws_storage_configurations.go | 76 +++++ ...storage_configurations_integration_test.go | 30 ++ client/service/mws_workspaces.go | 136 ++++++++ .../mws_workspaces_integration_test.go | 19 ++ databricks/provider.go | 46 ++- databricks/provider_test.go | 22 ++ .../resource_databricks_mws_credentials.go | 119 +++++++ ...rce_databricks_mws_credentials_mws_test.go | 137 ++++++++ .../resource_databricks_mws_networks.go | 197 +++++++++++ ...source_databricks_mws_networks_mws_test.go | 153 +++++++++ ...e_databricks_mws_storage_configurations.go | 115 +++++++ ...cks_mws_storage_configurations_mws_test.go | 142 ++++++++ .../resource_databricks_mws_workspaces.go | 314 ++++++++++++++++++ ...urce_databricks_mws_workspaces_mws_test.go | 121 +++++++ ...resource_databricks_scim_group_aws_test.go | 49 ++- ...source_databricks_scim_group_azure_test.go | 49 ++- .../resource_databricks_scim_user_aws_test.go | 5 +- ...esource_databricks_scim_user_azure_test.go | 61 ++-- ...esource_databricks_secret_acl_aws_test.go} | 25 +- ...source_databricks_secret_acl_azure_test.go | 131 ++++++++ ...=> resource_databricks_secret_aws_test.go} | 31 +- .../resource_databricks_secret_azure_test.go | 155 +++++++++ ...ource_databricks_secret_scope_aws_test.go} | 25 +- ...urce_databricks_secret_scope_azure_test.go | 119 +++++++ ... => resource_databricks_token_aws_test.go} | 32 +- .../resource_databricks_token_azure_test.go | 121 +++++++ databricks/utils.go | 24 ++ integration-environment-mws/README.MD | 37 +++ integration-environment-mws/prereqs.tf | 171 ++++++++++ integration-environment-mws/run.sh | 53 +++ .../cross_account_role_assume_policy.tpl | 17 + .../templates/cross_account_role_policy.tpl | 81 +++++ .../templates/storage_bucket_policy.tpl | 24 ++ 44 files changed, 3132 insertions(+), 171 deletions(-) create mode 100644 client/model/mws.go create mode 100644 client/service/mws_credentials.go create mode 100644 client/service/mws_credentials_integration_test.go create mode 100644 client/service/mws_customer_managed_keys.go create mode 100644 client/service/mws_customer_managed_keys_integration_test.go create mode 100644 client/service/mws_networks.go create mode 100644 client/service/mws_networks_integration_test.go create mode 100644 client/service/mws_storage_configurations.go create mode 100644 client/service/mws_storage_configurations_integration_test.go create mode 100644 client/service/mws_workspaces.go create mode 100644 client/service/mws_workspaces_integration_test.go create mode 100644 databricks/resource_databricks_mws_credentials.go create mode 100644 databricks/resource_databricks_mws_credentials_mws_test.go create mode 100644 databricks/resource_databricks_mws_networks.go create mode 100644 databricks/resource_databricks_mws_networks_mws_test.go create mode 100644 databricks/resource_databricks_mws_storage_configurations.go create mode 100644 databricks/resource_databricks_mws_storage_configurations_mws_test.go create mode 100644 databricks/resource_databricks_mws_workspaces.go create mode 100644 databricks/resource_databricks_mws_workspaces_mws_test.go rename databricks/{resource_databricks_secret_acl_test.go => resource_databricks_secret_acl_aws_test.go} (81%) create mode 100644 databricks/resource_databricks_secret_acl_azure_test.go rename databricks/{resource_databricks_secret_test.go => resource_databricks_secret_aws_test.go} (83%) create mode 100644 databricks/resource_databricks_secret_azure_test.go rename databricks/{resource_databricks_secret_scope_test.go => resource_databricks_secret_scope_aws_test.go} (80%) create mode 100644 databricks/resource_databricks_secret_scope_azure_test.go rename databricks/{resource_databricks_token_test.go => resource_databricks_token_aws_test.go} (75%) create mode 100644 databricks/resource_databricks_token_azure_test.go create mode 100644 integration-environment-mws/README.MD create mode 100644 integration-environment-mws/prereqs.tf create mode 100755 integration-environment-mws/run.sh create mode 100644 integration-environment-mws/templates/cross_account_role_assume_policy.tpl create mode 100644 integration-environment-mws/templates/cross_account_role_policy.tpl create mode 100644 integration-environment-mws/templates/storage_bucket_policy.tpl diff --git a/.gitignore b/.gitignore index 1e45218e7..cc5534499 100644 --- a/.gitignore +++ b/.gitignore @@ -329,3 +329,4 @@ website/public/** .vscode/private.env tf.log +*.env \ No newline at end of file diff --git a/Makefile b/Makefile index 4325a262f..9ae41145b 100644 --- a/Makefile +++ b/Makefile @@ -39,25 +39,6 @@ fmt: lint @echo "==> Formatting source code with gofmt..." @go fmt ./... - -python-setup: - @echo "==> Setting up virtual env and installing python libraries..." - @python -m pip install virtualenv - @cd docs && python -m virtualenv venv - @cd docs && source venv/bin/activate && python -m pip install -r requirements.txt - -docs: python-setup - @echo "==> Building Docs ..." - @cd docs && source venv/bin/activate && make clean && make html - -opendocs: python-setup docs - @echo "==> Opening Docs ..." - @cd docs && open build/html/index.html - -singlehtmldocs: python-setup - @echo "==> Building Docs ..." - @cd docs && source venv/bin/activate && make clean && make singlehtml - vendor: @echo "==> Filling vendor folder with library code..." @go mod vendor @@ -70,7 +51,12 @@ terraform-acc-azure: fmt # INTEGRATION TESTING WITH AWS terraform-acc-aws: fmt @echo "==> Running Terraform Acceptance Tests for AWS..." - @CLOUD_ENV="aws" TF_ACC=1 gotestsum --format short-verbose --raw-command go test -v -json -tags=aws -short -coverprofile=coverage.out ./... + @CLOUD_ENV="aws" TF_ACC=1 gotestsum --format short-verbose --raw-command go test -v -json -short -coverprofile=coverage.out -run 'TestAccAws' ./... + +# INTEGRATION TESTING WITH AWS +terraform-acc-mws: fmt + @echo "==> Running Terraform Acceptance Tests for Multiple Workspace APIs on AWS..." + @/bin/bash integration-environment-mws/run.sh terraform-setup: build @echo "==> Initializing Terraform..." diff --git a/client/model/mws.go b/client/model/mws.go new file mode 100644 index 000000000..146319987 --- /dev/null +++ b/client/model/mws.go @@ -0,0 +1,110 @@ +package model + +// StsRole is the object that contains cross account role arn and external app id +type StsRole struct { + RoleArn string `json:"role_arn,omitempty"` + ExternalID string `json:"external_id,omitempty"` +} + +// AwsCredentials is the object that points to the cross account role +type AwsCredentials struct { + StsRole *StsRole `json:"sts_role,omitempty"` +} + +// MWSCredentials is the object that contains all the information for the credentials to create a workspace +type MWSCredentials struct { + CredentialsID string `json:"credentials_id,omitempty"` + CredentialsName string `json:"credentials_name,omitempty"` + AwsCredentials *AwsCredentials `json:"aws_credentials,omitempty"` + AccountID string `json:"account_id,omitempty"` + CreationTime int64 `json:"creation_time,omitempty"` +} + +// RootBucketInfo points to a bucket name +type RootBucketInfo struct { + BucketName string `json:"bucket_name,omitempty"` +} + +// MWSStorageConfigurations is the object that contains all the information for the root storage bucket +type MWSStorageConfigurations struct { + StorageConfigurationID string `json:"storage_configuration_id,omitempty"` + StorageConfigurationName string `json:"storage_configuration_name,omitempty"` + RootBucketInfo *RootBucketInfo `json:"root_bucket_info,omitempty"` + AccountID string `json:"account_id,omitempty"` + CreationTime int64 `json:"creation_time,omitempty"` +} + +// NetworkHealth is the object that contains all the error message when attaching a network to workspace +type NetworkHealth struct { + ErrorType string `json:"error_type,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` +} + +// MWSNetwork is the object that contains all the information for BYOVPC +type MWSNetwork struct { + NetworkID string `json:"network_id,omitempty"` + NetworkName string `json:"network_name,omitempty"` + VPCID string `json:"vpc_id,omitempty"` + SubnetIds []string `json:"subnet_ids,omitempty"` + SecurityGroupIds []string `json:"security_group_ids,omitempty"` + VPCStatus string `json:"vpc_status,omitempty"` + ErrorMessages []NetworkHealth `json:"error_messages,omitempty"` + WorkspaceID int64 `json:"workspace_id,omitempty"` + AccountID string `json:"account_id,omitempty"` + CreationTime int64 `json:"creation_time,omitempty"` +} + +// AwsKeyInfo has information about the KMS key for BYOK +type AwsKeyInfo struct { + KeyArn string `json:"key_arn,omitempty"` + KeyAlias string `json:"key_alias,omitempty"` + KeyRegion string `json:"key_region,omitempty"` +} + +// MWSCustomerManagedKey contains key information and metadata for BYOK for E2 +type MWSCustomerManagedKey struct { + CustomerManagedKeyID string `json:"customer_managed_key_id,omitempty"` + AwsKeyInfo *AwsKeyInfo `json:"aws_key_info,omitempty"` + AccountID string `json:"account_id,omitempty"` + CreationTime int64 `json:"creation_time,omitempty"` +} + +// List of workspace statuses for provisioning the workspace +const ( + WorkspaceStatusNotProvisioned = "NOT_PROVISIONED" + WorkspaceStatusProvisioning = "PROVISIONING" + WorkspaceStatusRunning = "RUNNING" + WorkspaceStatusFailed = "FAILED" + WorkspaceStatusCanceled = "CANCELLED" +) + +// WorkspaceStatusesNonRunnable is a list of statuses in which the workspace is not runnable +var WorkspaceStatusesNonRunnable = []string{WorkspaceStatusCanceled, WorkspaceStatusFailed} + +// ContainsWorkspaceState given a list of workspaceStates and the search state +// it will return true if it found the search state +func ContainsWorkspaceState(workspaceStates []string, searchState string) bool { + for _, state := range workspaceStates { + if state == searchState { + return true + } + } + return false +} + +// MWSWorkspace is the object that contains all the information for deploying a E2 workspace +type MWSWorkspace struct { + WorkspaceID int64 `json:"workspace_id,omitempty"` + WorkspaceName string `json:"workspace_name,omitempty"` + DeploymentName string `json:"deployment_name,omitempty"` + AwsRegion string `json:"aws_region,omitempty"` + CredentialsID string `json:"credentials_id,omitempty"` + StorageConfigurationID string `json:"storage_configuration_id,omitempty"` + NetworkID string `json:"network_id,omitempty"` + CustomerManagedKeyID string `json:"customer_managed_key_id,omitempty"` + IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` + AccountID string `json:"account_id,omitempty"` + WorkspaceStatus string `json:"workspace_status,omitempty"` + WorkspaceStatusMessage string `json:"workspace_status_message,omitempty"` + CreationTime int64 `json:"creation_time,omitempty"` +} diff --git a/client/service/apis.go b/client/service/apis.go index ba408dc4c..f17b6ab03 100644 --- a/client/service/apis.go +++ b/client/service/apis.go @@ -86,6 +86,31 @@ func (c *DBApiClient) Commands() CommandsAPI { return CommandsAPI{Client: c} } +// MWSCredentials returns an instance of MWSCredentialsAPI +func (c *DBApiClient) MWSCredentials() MWSCredentialsAPI { + return MWSCredentialsAPI{Client: c} +} + +// MWSStorageConfigurations returns an instance of MWSStorageConfigurationsAPI +func (c *DBApiClient) MWSStorageConfigurations() MWSStorageConfigurationsAPI { + return MWSStorageConfigurationsAPI{Client: c} +} + +// MWSWorkspaces returns an instance of MWSWorkspacesAPI +func (c *DBApiClient) MWSWorkspaces() MWSWorkspacesAPI { + return MWSWorkspacesAPI{Client: c} +} + +// MWSNetworks returns an instance of MWSNetworksAPI +func (c *DBApiClient) MWSNetworks() MWSNetworksAPI { + return MWSNetworksAPI{Client: c} +} + +// MWSCustomerManagedKeys returns an instance of MWSCustomerManagedKeysAPI +func (c *DBApiClient) MWSCustomerManagedKeys() MWSCustomerManagedKeysAPI { + return MWSCustomerManagedKeysAPI{Client: c} +} + func (c *DBApiClient) performQuery(method, path string, apiVersion string, headers map[string]string, data interface{}, secretsMask *SecretsMask) ([]byte, error) { err := c.Config.getOrCreateToken() if err != nil { diff --git a/client/service/main_test.go b/client/service/main_test.go index 7d0af2d44..0a895009c 100644 --- a/client/service/main_test.go +++ b/client/service/main_test.go @@ -1,6 +1,7 @@ package service import ( + "encoding/base64" "encoding/json" "fmt" "github.com/joho/godotenv" @@ -51,6 +52,18 @@ func GetIntegrationDBAPIClient() *DBApiClient { return &c } +func GetIntegrationMWSAPIClient() *DBApiClient { + var config DBApiClientConfig + tokenUnB64 := fmt.Sprintf("%s:%s", os.Getenv("DATABRICKS_USERNAME"), os.Getenv("DATABRICKS_PASSWORD")) + config.AuthType = BasicAuth + config.Token = base64.StdEncoding.EncodeToString([]byte(tokenUnB64)) + config.Host = os.Getenv("DATABRICKS_MWS_HOST") + + var c DBApiClient + c.SetConfig(&config) + return &c +} + func GetCloudInstanceType(c *DBApiClient) string { if strings.Contains(c.Config.Host, "azure") { return "Standard_DS3_v2" diff --git a/client/service/mws_credentials.go b/client/service/mws_credentials.go new file mode 100644 index 000000000..3b77cf18e --- /dev/null +++ b/client/service/mws_credentials.go @@ -0,0 +1,77 @@ +package service + +import ( + "encoding/json" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "net/http" +) + +// MWSCredentialsAPI exposes the mws credentials API +type MWSCredentialsAPI struct { + Client *DBApiClient +} + +// Create creates a set of E2 Credentials for the cross account role +func (a MWSCredentialsAPI) Create(mwsAcctId, credentialsName string, roleArn string) (model.MWSCredentials, error) { + var mwsCreds model.MWSCredentials + + credentialsAPIPath := fmt.Sprintf("/accounts/%s/credentials", mwsAcctId) + + mwsCredentialsRequest := model.MWSCredentials{ + CredentialsName: credentialsName, + AwsCredentials: &model.AwsCredentials{ + StsRole: &model.StsRole{ + RoleArn: roleArn, + }, + }, + } + + resp, err := a.Client.performQuery(http.MethodPost, credentialsAPIPath, "2.0", nil, mwsCredentialsRequest, nil) + if err != nil { + return mwsCreds, err + } + + err = json.Unmarshal(resp, &mwsCreds) + return mwsCreds, err +} + +// Read returns the credentials object along with metadata +func (a MWSCredentialsAPI) Read(mwsAcctId, credentialsID string) (model.MWSCredentials, error) { + var mwsCreds model.MWSCredentials + + credentialsAPIPath := fmt.Sprintf("/accounts/%s/credentials/%s", mwsAcctId, credentialsID) + + resp, err := a.Client.performQuery(http.MethodGet, credentialsAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsCreds, err + } + + err = json.Unmarshal(resp, &mwsCreds) + return mwsCreds, err +} + +// Delete deletes the credentials object given a credentials id +func (a MWSCredentialsAPI) Delete(mwsAcctId, credentialsID string) error { + + credentialsAPIPath := fmt.Sprintf("/accounts/%s/credentials/%s", mwsAcctId, credentialsID) + + _, err := a.Client.performQuery(http.MethodDelete, credentialsAPIPath, "2.0", nil, nil, nil) + + return err +} + +// List lists all the available credentials object in the mws account +func (a MWSCredentialsAPI) List(mwsAcctId string) ([]model.MWSCredentials, error) { + var mwsCredsList []model.MWSCredentials + + credentialsAPIPath := fmt.Sprintf("/accounts/%s/credentials", mwsAcctId) + + resp, err := a.Client.performQuery(http.MethodGet, credentialsAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsCredsList, err + } + + err = json.Unmarshal(resp, &mwsCredsList) + return mwsCredsList, err +} diff --git a/client/service/mws_credentials_integration_test.go b/client/service/mws_credentials_integration_test.go new file mode 100644 index 000000000..ab804521b --- /dev/null +++ b/client/service/mws_credentials_integration_test.go @@ -0,0 +1,30 @@ +package service + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestE2Creds(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + acctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + client := GetIntegrationMWSAPIClient() + credsList, err := client.MWSCredentials().List(acctId) + assert.NoError(t, err, err) + t.Log(credsList) + + myCreds, err := client.MWSCredentials().Create(acctId, "sri-mws-terraform-automation-role", "arn:aws:iam::997819999999:role/sri-e2-terraform-automation-role") + assert.NoError(t, err, err) + + myCredsFull, err := client.MWSCredentials().Read(acctId, myCreds.CredentialsID) + assert.NoError(t, err, err) + t.Log(myCredsFull.AwsCredentials.StsRole.ExternalID) + + defer func() { + err = client.MWSCredentials().Delete(acctId, myCreds.CredentialsID) + assert.NoError(t, err, err) + }() +} diff --git a/client/service/mws_customer_managed_keys.go b/client/service/mws_customer_managed_keys.go new file mode 100644 index 000000000..86642bf19 --- /dev/null +++ b/client/service/mws_customer_managed_keys.go @@ -0,0 +1,69 @@ +package service + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "net/http" +) + +// MWSCustomerManagedKeysAPI exposes the mws customerManagedKeys API +type MWSCustomerManagedKeysAPI struct { + Client *DBApiClient +} + +// Create creates a set of E2 CustomerManagedKeys for the BYOVPC +func (a MWSCustomerManagedKeysAPI) Create(mwsAcctId, keyArn, keyAlias, keyRegion string) (model.MWSCustomerManagedKey, error) { + var mwsCustomerManagedKey model.MWSCustomerManagedKey + + customerManagedKeysAPIPath := fmt.Sprintf("/accounts/%s/customer-managed-keys", mwsAcctId) + mwsCustomerManagedKeysRequest := model.MWSCustomerManagedKey{ + AwsKeyInfo: &model.AwsKeyInfo{ + KeyArn: keyArn, + KeyAlias: keyAlias, + KeyRegion: keyRegion, + }, + } + resp, err := a.Client.performQuery(http.MethodPost, customerManagedKeysAPIPath, "2.0", nil, mwsCustomerManagedKeysRequest, nil) + if err != nil { + return mwsCustomerManagedKey, err + } + err = json.Unmarshal(resp, &mwsCustomerManagedKey) + return mwsCustomerManagedKey, err +} + +// Read returns the customer managed key object along with metadata +func (a MWSCustomerManagedKeysAPI) Read(mwsAcctId, customerManagedKeysID string) (model.MWSCustomerManagedKey, error) { + var mwsCustomerManagedKey model.MWSCustomerManagedKey + customerManagedKeysAPIPath := fmt.Sprintf("/accounts/%s/customer-managed-keys/%s", mwsAcctId, customerManagedKeysID) + resp, err := a.Client.performQuery(http.MethodGet, customerManagedKeysAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsCustomerManagedKey, err + } + err = json.Unmarshal(resp, &mwsCustomerManagedKey) + return mwsCustomerManagedKey, err +} + +// Delete deletes the customer managed key object given a network id +func (a MWSCustomerManagedKeysAPI) Delete(customerManagedKeysID string) error { + //customerManagedKeysAPIPath := fmt.Sprintf("/accounts/%s/customer-managed-keys/%s", a.Client.Config.E2AcctID, customerManagedKeysID) + //_, err := a.Client.performQuery(http.MethodDelete, customerManagedKeysAPIPath, "2.0", nil, nil, nil) + //return err + return errors.New("delete is not yet supported") +} + +// List lists all the available customer managed key objects in the mws account +func (a MWSCustomerManagedKeysAPI) List(mwsAcctId string) ([]model.MWSCustomerManagedKey, error) { + var mwsCustomerManagedKeyList []model.MWSCustomerManagedKey + + customerManagedKeysAPIPath := fmt.Sprintf("/accounts/%s/customer-managed-keys", mwsAcctId) + + resp, err := a.Client.performQuery(http.MethodGet, customerManagedKeysAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsCustomerManagedKeyList, err + } + + err = json.Unmarshal(resp, &mwsCustomerManagedKeyList) + return mwsCustomerManagedKeyList, err +} diff --git a/client/service/mws_customer_managed_keys_integration_test.go b/client/service/mws_customer_managed_keys_integration_test.go new file mode 100644 index 000000000..d5e13b6c4 --- /dev/null +++ b/client/service/mws_customer_managed_keys_integration_test.go @@ -0,0 +1,19 @@ +package service + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestE2CustomerManagedKeys(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + acctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + client := GetIntegrationMWSAPIClient() + networksList, err := client.MWSCustomerManagedKeys().List(acctId) + assert.NoError(t, err, err) + t.Log(networksList) + +} diff --git a/client/service/mws_networks.go b/client/service/mws_networks.go new file mode 100644 index 000000000..cebbba3f2 --- /dev/null +++ b/client/service/mws_networks.go @@ -0,0 +1,65 @@ +package service + +import ( + "encoding/json" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "net/http" +) + +// MWSNetworksAPI exposes the mws networks API +type MWSNetworksAPI struct { + Client *DBApiClient +} + +// Create creates a set of E2 Networks for the BYOVPC +func (a MWSNetworksAPI) Create(mwsAcctId, networkName string, VPCID string, subnetIds []string, securityGroupIds []string) (model.MWSNetwork, error) { + var mwsNetwork model.MWSNetwork + networksAPIPath := fmt.Sprintf("/accounts/%s/networks", mwsAcctId) + mwsNetworksRequest := model.MWSNetwork{ + NetworkName: networkName, + VPCID: VPCID, + SubnetIds: subnetIds, + SecurityGroupIds: securityGroupIds, + } + resp, err := a.Client.performQuery(http.MethodPost, networksAPIPath, "2.0", nil, mwsNetworksRequest, nil) + if err != nil { + return mwsNetwork, err + } + err = json.Unmarshal(resp, &mwsNetwork) + return mwsNetwork, err +} + +// Read returns the network object along with metadata and any additional errors when attaching to workspace +func (a MWSNetworksAPI) Read(mwsAcctId, networksID string) (model.MWSNetwork, error) { + var mwsNetwork model.MWSNetwork + networksAPIPath := fmt.Sprintf("/accounts/%s/networks/%s", mwsAcctId, networksID) + resp, err := a.Client.performQuery(http.MethodGet, networksAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsNetwork, err + } + err = json.Unmarshal(resp, &mwsNetwork) + return mwsNetwork, err +} + +// Delete deletes the network object given a network id +func (a MWSNetworksAPI) Delete(mwsAcctId, networksID string) error { + networksAPIPath := fmt.Sprintf("/accounts/%s/networks/%s", mwsAcctId, networksID) + _, err := a.Client.performQuery(http.MethodDelete, networksAPIPath, "2.0", nil, nil, nil) + return err +} + +// List lists all the available network objects in the mws account +func (a MWSNetworksAPI) List(mwsAcctId string) ([]model.MWSNetwork, error) { + var mwsNetworkList []model.MWSNetwork + + networksAPIPath := fmt.Sprintf("/accounts/%s/networks", mwsAcctId) + + resp, err := a.Client.performQuery(http.MethodGet, networksAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsNetworkList, err + } + + err = json.Unmarshal(resp, &mwsNetworkList) + return mwsNetworkList, err +} diff --git a/client/service/mws_networks_integration_test.go b/client/service/mws_networks_integration_test.go new file mode 100644 index 000000000..7f474e39d --- /dev/null +++ b/client/service/mws_networks_integration_test.go @@ -0,0 +1,31 @@ +package service + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestE2Networks(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + acctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + client := GetIntegrationMWSAPIClient() + networksList, err := client.MWSNetworks().List(acctId) + assert.NoError(t, err, err) + t.Log(networksList) + + myNetwork, err := client.MWSNetworks().Create(acctId, "sri-mws-terraform-automation-network", + "vpc-0abcdef1234567890", []string{"subnet-0123456789abcdef0", "subnet-0fedcba9876543210"}, []string{"sg-0a1b2c3d4e5f6a7b8"}) + assert.NoError(t, err, err) + defer func() { + err = client.MWSNetworks().Delete(acctId, myNetwork.NetworkID) + assert.NoError(t, err, err) + }() + + myNetworkFull, err := client.MWSNetworks().Read(acctId, myNetwork.NetworkID) + assert.NoError(t, err, err) + t.Log(myNetworkFull) + +} diff --git a/client/service/mws_storage_configurations.go b/client/service/mws_storage_configurations.go new file mode 100644 index 000000000..06b7d4b8b --- /dev/null +++ b/client/service/mws_storage_configurations.go @@ -0,0 +1,76 @@ +package service + +import ( + "encoding/json" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + + "net/http" +) + +// MWSStorageConfigurationsAPI exposes the mws storageConfiguration API +type MWSStorageConfigurationsAPI struct { + Client *DBApiClient +} + +// Create creates a configuration for the root s3 bucket +func (a MWSStorageConfigurationsAPI) Create(mwsAcctId, storageConfigurationName string, bucketName string) (model.MWSStorageConfigurations, error) { + var mwsStorageConfigurations model.MWSStorageConfigurations + + storageConfigurationAPIPath := fmt.Sprintf("/accounts/%s/storage-configurations", mwsAcctId) + + mwsStorageConfigurationsRequest := model.MWSStorageConfigurations{ + StorageConfigurationName: storageConfigurationName, + RootBucketInfo: &model.RootBucketInfo{ + BucketName: bucketName, + }, + } + + resp, err := a.Client.performQuery(http.MethodPost, storageConfigurationAPIPath, "2.0", nil, mwsStorageConfigurationsRequest, nil) + if err != nil { + return mwsStorageConfigurations, err + } + + err = json.Unmarshal(resp, &mwsStorageConfigurations) + return mwsStorageConfigurations, err +} + +// Read returns the configuration for the root s3 bucket and metadata for the storage configuration +func (a MWSStorageConfigurationsAPI) Read(mwsAcctId, storageConfigurationID string) (model.MWSStorageConfigurations, error) { + var mwsStorageConfigurations model.MWSStorageConfigurations + + storageConfigurationAPIPath := fmt.Sprintf("/accounts/%s/storage-configurations/%s", mwsAcctId, storageConfigurationID) + + resp, err := a.Client.performQuery(http.MethodGet, storageConfigurationAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsStorageConfigurations, err + } + + err = json.Unmarshal(resp, &mwsStorageConfigurations) + return mwsStorageConfigurations, err +} + +// Delete deletes the configuration for the root s3 bucket +func (a MWSStorageConfigurationsAPI) Delete(mwsAcctId, storageConfigurationID string) error { + + storageConfigurationAPIPath := fmt.Sprintf("/accounts/%s/storage-configurations/%s", mwsAcctId, storageConfigurationID) + + _, err := a.Client.performQuery(http.MethodDelete, storageConfigurationAPIPath, "2.0", nil, nil, nil) + + return err +} + +// List lists all the storage configurations for the root s3 buckets in the E2 account ID provided to the client config +func (a MWSStorageConfigurationsAPI) List(mwsAcctId string) ([]model.MWSStorageConfigurations, error) { + var mwsStorageConfigurationsList []model.MWSStorageConfigurations + + storageConfigurationAPIPath := fmt.Sprintf("/accounts/%s/storage-configurations", mwsAcctId) + + resp, err := a.Client.performQuery(http.MethodGet, storageConfigurationAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsStorageConfigurationsList, err + } + + err = json.Unmarshal(resp, &mwsStorageConfigurationsList) + return mwsStorageConfigurationsList, err +} diff --git a/client/service/mws_storage_configurations_integration_test.go b/client/service/mws_storage_configurations_integration_test.go new file mode 100644 index 000000000..7e98ea826 --- /dev/null +++ b/client/service/mws_storage_configurations_integration_test.go @@ -0,0 +1,30 @@ +package service + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestE2StorageConfigurations(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + acctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + client := GetIntegrationMWSAPIClient() + storageConfigsList, err := client.MWSStorageConfigurations().List(acctId) + assert.NoError(t, err, err) + t.Log(storageConfigsList) + + storageConfig, err := client.MWSStorageConfigurations().Create(acctId, "sri-mws-terraform-storage-root-bucket", "sri-root-s3-bucket") + assert.NoError(t, err, err) + + myStorageConfig, err := client.MWSStorageConfigurations().Read(acctId, storageConfig.StorageConfigurationID) + assert.NoError(t, err, err) + t.Log(myStorageConfig.RootBucketInfo.BucketName) + + defer func() { + err = client.MWSStorageConfigurations().Delete(acctId, storageConfig.StorageConfigurationID) + assert.NoError(t, err, err) + }() +} diff --git a/client/service/mws_workspaces.go b/client/service/mws_workspaces.go new file mode 100644 index 000000000..c0ac29824 --- /dev/null +++ b/client/service/mws_workspaces.go @@ -0,0 +1,136 @@ +package service + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "log" + "net/http" + "reflect" + "time" +) + +// MWSWorkspacesAPI exposes the mws workspaces API +type MWSWorkspacesAPI struct { + Client *DBApiClient +} + +// Create creates the workspace creation process +func (a MWSWorkspacesAPI) Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyID string, isNoPublicIpEnabled bool) (model.MWSWorkspace, error) { + var mwsWorkspace model.MWSWorkspace + + workspacesAPIPath := fmt.Sprintf("/accounts/%s/workspaces", mwsAcctId) + + mwsWorkspacesRequest := model.MWSWorkspace{ + WorkspaceName: workspaceName, + DeploymentName: deploymentName, + AwsRegion: awsRegion, + CredentialsID: credentialsID, + StorageConfigurationID: storageConfigurationID, + IsNoPublicIpEnabled: isNoPublicIpEnabled, + } + + if !reflect.ValueOf(networkID).IsZero() { + mwsWorkspacesRequest.NetworkID = networkID + } + + if !reflect.ValueOf(customerManagedKeyID).IsZero() { + mwsWorkspacesRequest.CustomerManagedKeyID = customerManagedKeyID + } + + resp, err := a.Client.performQuery(http.MethodPost, workspacesAPIPath, "2.0", nil, mwsWorkspacesRequest, nil) + if err != nil { + return mwsWorkspace, err + } + + err = json.Unmarshal(resp, &mwsWorkspace) + return mwsWorkspace, err +} + +// WaitForWorkspaceRunning will hold the main thread till the workspace is in a running state +func (a MWSWorkspacesAPI) WaitForWorkspaceRunning(mwsAcctId string, workspaceID int64, sleepDurationSeconds time.Duration, timeoutDurationMinutes time.Duration) error { + errChan := make(chan error, 1) + go func() { + for { + workspace, err := a.Read(mwsAcctId, workspaceID) + if err != nil { + errChan <- err + } + if workspace.WorkspaceStatus == model.WorkspaceStatusRunning { + errChan <- nil + } else if model.ContainsWorkspaceState(model.WorkspaceStatusesNonRunnable, workspace.WorkspaceStatus) { + errChan <- errors.New("Workspace is in a non runnable state will not be able to transition to running, needs " + + "to be created again. Current state: " + string(workspace.WorkspaceStatus)) + } + log.Println("Waiting for workspace to go to running, current state is: " + workspace.WorkspaceStatus) + time.Sleep(sleepDurationSeconds * time.Second) + } + }() + select { + case err := <-errChan: + return err + case <-time.After(timeoutDurationMinutes * time.Minute): + return errors.New("Timed out workspace has not reached running state") + } +} + +// Patch will relaunch the mws workspace deployment TODO: may need to include customer managed key +func (a MWSWorkspacesAPI) Patch(mwsAcctId string, workspaceID int64, awsRegion, credentialsID, storageConfigurationID, networkID string, isNoPublicIpEnabled bool) error { + workspacesAPIPath := fmt.Sprintf("/accounts/%s/workspaces/%d", mwsAcctId, workspaceID) + + mwsWorkspacesRequest := model.MWSWorkspace{ + AwsRegion: awsRegion, + CredentialsID: credentialsID, + StorageConfigurationID: storageConfigurationID, + IsNoPublicIpEnabled: isNoPublicIpEnabled, + } + + if !reflect.ValueOf(networkID).IsZero() { + mwsWorkspacesRequest.NetworkID = networkID + } + + _, err := a.Client.performQuery(http.MethodPatch, workspacesAPIPath, "2.0", nil, mwsWorkspacesRequest, nil) + return err +} + +// Read will return the mws workspace metadata and status of the workspace deployment +func (a MWSWorkspacesAPI) Read(mwsAcctId string, workspaceID int64) (model.MWSWorkspace, error) { + var mwsWorkspace model.MWSWorkspace + + workspacesAPIPath := fmt.Sprintf("/accounts/%s/workspaces/%d", mwsAcctId, workspaceID) + + resp, err := a.Client.performQuery(http.MethodGet, workspacesAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsWorkspace, err + } + + err = json.Unmarshal(resp, &mwsWorkspace) + return mwsWorkspace, err +} + +// Delete will delete the configuration for the workspace given a workspace id and will not block. A follow up email +// will be sent when the workspace is fully deleted. +func (a MWSWorkspacesAPI) Delete(mwsAcctId string, workspaceID int64) error { + + workspacesAPIPath := fmt.Sprintf("/accounts/%s/workspaces/%d", mwsAcctId, workspaceID) + + _, err := a.Client.performQuery(http.MethodDelete, workspacesAPIPath, "2.0", nil, nil, nil) + + return err +} + +// List will list all workspaces in a given mws account +func (a MWSWorkspacesAPI) List(mwsAcctId string) ([]model.MWSWorkspace, error) { + var mwsWorkspacesList []model.MWSWorkspace + + workspacesAPIPath := fmt.Sprintf("/accounts/%s/workspaces", mwsAcctId) + + resp, err := a.Client.performQuery(http.MethodGet, workspacesAPIPath, "2.0", nil, nil, nil) + if err != nil { + return mwsWorkspacesList, err + } + + err = json.Unmarshal(resp, &mwsWorkspacesList) + return mwsWorkspacesList, err +} diff --git a/client/service/mws_workspaces_integration_test.go b/client/service/mws_workspaces_integration_test.go new file mode 100644 index 000000000..68d643092 --- /dev/null +++ b/client/service/mws_workspaces_integration_test.go @@ -0,0 +1,19 @@ +package service + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestE2Workspace(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + acctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + client := GetIntegrationMWSAPIClient() + workspaceList, err := client.MWSWorkspaces().List(acctId) + assert.NoError(t, err, err) + t.Log(workspaceList) + +} diff --git a/databricks/provider.go b/databricks/provider.go index 5829f8f22..ee882327d 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -1,6 +1,7 @@ package databricks import ( + "encoding/base64" "fmt" "log" "os" @@ -39,6 +40,11 @@ func Provider(version string) terraform.ResourceProvider { "databricks_azure_blob_mount": resourceAzureBlobMount(), "databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(), "databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(), + // MWS (multiple workspaces) resources are only limited to AWS as azure already has a built in concept of MWS + "databricks_mws_credentials": resourceMWSCredentials(), + "databricks_mws_storage_configurations": resourceMWSStorageConfigurations(), + "databricks_mws_networks": resourceMWSNetworks(), + "databricks_mws_workspaces": resourceMWSWorkspaces(), }, Schema: map[string]*schema.Schema{ "host": &schema.Schema{ @@ -47,9 +53,32 @@ func Provider(version string) terraform.ResourceProvider { DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_HOST", nil), }, "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_TOKEN", nil), + Type: schema.TypeString, + Optional: true, + Sensitive: true, + DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_TOKEN", nil), + ConflictsWith: []string{"basic_auth"}, + }, + "basic_auth": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_USERNAME", nil), + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Sensitive: true, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_PASSWORD", nil), + }, + }, + }, + ConflictsWith: []string{"token"}, }, "azure_auth": &schema.Schema{ Type: schema.TypeMap, @@ -209,6 +238,17 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac if token, ok := d.GetOk("token"); ok { config.Token = token.(string) } + + // Basic authentication setup via username and password + if _, ok := d.GetOk("basic_auth"); ok { + username, userOk := d.GetOk("basic_auth.0.username") + password, passOk := d.GetOk("basic_auth.0.password") + if userOk && passOk { + tokenUnB64 := fmt.Sprintf("%s:%s", username.(string), password.(string)) + config.Token = base64.StdEncoding.EncodeToString([]byte(tokenUnB64)) + config.AuthType = service.BasicAuth + } + } } else { // Abstracted logic to another function that returns a interface{}, error to inject directly // for the providers during cloud integration testing diff --git a/databricks/provider_test.go b/databricks/provider_test.go index 011d6ebc9..d89525b1b 100644 --- a/databricks/provider_test.go +++ b/databricks/provider_test.go @@ -1,6 +1,7 @@ package databricks import ( + "encoding/base64" "fmt" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "log" @@ -15,9 +16,11 @@ import ( var testAccProviders map[string]terraform.ResourceProvider var testAccProvider *schema.Provider +var testMWSProvider *schema.Provider func init() { testAccProvider = Provider("").(*schema.Provider) + testMWSProvider = Provider("").(*schema.Provider) cloudEnv := os.Getenv("CLOUD_ENV") // If Azure inject sp based auth, this should probably have a different environment variable @@ -32,6 +35,25 @@ func init() { testAccProviders = map[string]terraform.ResourceProvider{ "databricks": testAccProvider, } + +} + +func getMWSClient() *service.DBApiClient { + // Configure MWS Provider + mwsHost := os.Getenv("DATABRICKS_MWS_HOST") + mwsUser := os.Getenv("DATABRICKS_USERNAME") + mwsPass := os.Getenv("DATABRICKS_PASSWORD") + + tokenUnB64 := fmt.Sprintf("%s:%s", mwsUser, mwsPass) + token := base64.StdEncoding.EncodeToString([]byte(tokenUnB64)) + config := service.DBApiClientConfig{ + Host: mwsHost, + Token: token, + AuthType: service.BasicAuth, + } + return &service.DBApiClient{ + Config: &config, + } } func TestMain(m *testing.M) { diff --git a/databricks/resource_databricks_mws_credentials.go b/databricks/resource_databricks_mws_credentials.go new file mode 100644 index 000000000..12656c5d9 --- /dev/null +++ b/databricks/resource_databricks_mws_credentials.go @@ -0,0 +1,119 @@ +package databricks + +import ( + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "log" + "strings" +) + +func resourceMWSCredentials() *schema.Resource { + return &schema.Resource{ + Create: resourceMWSCredentialsCreate, + Read: resourceMWSCredentialsRead, + Delete: resourceMWSCredentialsDelete, + + Schema: map[string]*schema.Schema{ + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Sensitive: true, + ForceNew: true, + }, + "credentials_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + "external_id": { + Type: schema.TypeString, + Computed: true, + }, + "credentials_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceMWSCredentialsCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + credentialsName := d.Get("credentials_name").(string) + roleArn := d.Get("role_arn").(string) + mwsAcctId := d.Get("account_id").(string) + credentials, err := client.MWSCredentials().Create(mwsAcctId, credentialsName, roleArn) + if err != nil { + return err + } + credentialsResourceId := PackagedMWSIds{ + MwsAcctId: mwsAcctId, + ResourceId: credentials.CredentialsID, + } + d.SetId(packMWSAccountId(credentialsResourceId)) + return resourceMWSCredentialsRead(d, m) +} + +func resourceMWSCredentialsRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + credentials, err := client.MWSCredentials().Read(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + if err != nil { + if isMWSCredentialsMissing(err.Error()) { + log.Printf("Missing e2 credentials with id: %s.", packagedMwsId.ResourceId) + d.SetId("") + return nil + } + return err + } + err = d.Set("credentials_name", credentials.CredentialsName) + if err != nil { + return err + } + err = d.Set("role_arn", credentials.AwsCredentials.StsRole.RoleArn) + if err != nil { + return err + } + err = d.Set("creation_time", credentials.CreationTime) + if err != nil { + return err + } + err = d.Set("external_id", credentials.AwsCredentials.StsRole.ExternalID) + if err != nil { + return err + } + err = d.Set("credentials_id", credentials.CredentialsID) + if err != nil { + return err + } + return nil +} + +func resourceMWSCredentialsDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + err = client.MWSCredentials().Delete(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + return err +} + +func isMWSCredentialsMissing(errorMsg string) bool { + return strings.Contains(errorMsg, "RESOURCE_DOES_NOT_EXIST") +} diff --git a/databricks/resource_databricks_mws_credentials_mws_test.go b/databricks/resource_databricks_mws_credentials_mws_test.go new file mode 100644 index 000000000..3ba79f62d --- /dev/null +++ b/databricks/resource_databricks_mws_credentials_mws_test.go @@ -0,0 +1,137 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "os" + "testing" +) + +func TestAccMWSCredentials(t *testing.T) { + var MWSCredentials model.MWSCredentials + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsHost := os.Getenv("DATABRICKS_MWS_HOST") + awsAcctId := "999999999999" + credentialsName := "test-mws-credentials-tf" + roleName := "terraform-creds-role" + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testMWSCredentialsResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSCredentialsResourceExists("databricks_mws_credentials.my_e2_credentials", &MWSCredentials, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "credentials_name", credentialsName), + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSCredentialsResourceExists("databricks_mws_credentials.my_e2_credentials", &MWSCredentials, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "credentials_name", credentialsName), + ), + ExpectNonEmptyPlan: false, + Destroy: false, + }, + { + PreConfig: func() { + conn := getMWSClient() + err := conn.MWSCredentials().Delete(MWSCredentials.AccountID, MWSCredentials.CredentialsID) + if err != nil { + panic(err) + } + }, + // use a dynamic configuration with the random name from above + Config: testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // verify local values + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "credentials_name", credentialsName), + ), + Destroy: false, + }, + }, + }) +} + +func testMWSCredentialsResourceDestroy(s *terraform.State) error { + client := getMWSClient() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_mws_credentials" { + continue + } + packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + if err != nil { + return err + } + _, err = client.MWSCredentials().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + if err != nil { + return nil + } + return errors.New("resource Scim Group is not cleaned up") + } + return nil +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testMWSCredentialsResourceExists(n string, mwsCreds *model.MWSCredentials, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := getMWSClient() + packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + if err != nil { + return err + } + resp, err := conn.MWSCredentials().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *mwsCreds = resp + return nil + } +} + +func testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName string) string { + return fmt.Sprintf(` + provider "databricks" { + host = "%s" + basic_auth {} + } + resource "databricks_mws_credentials" "my_e2_credentials" { + account_id = "%s" + credentials_name = "%s" + role_arn = "arn:aws:iam::%s:role/%s" + } + `, mwsHost, mwsAcctId, credentialsName, awsAcctId, roleName) +} diff --git a/databricks/resource_databricks_mws_networks.go b/databricks/resource_databricks_mws_networks.go new file mode 100644 index 000000000..c1fa514c6 --- /dev/null +++ b/databricks/resource_databricks_mws_networks.go @@ -0,0 +1,197 @@ +package databricks + +import ( + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "log" + "reflect" + "strings" +) + +func resourceMWSNetworks() *schema.Resource { + return &schema.Resource{ + Create: resourceMWSNetworkCreate, + Read: resourceMWSNetworkRead, + Delete: resourceMWSNetworkDelete, + + Schema: map[string]*schema.Schema{ + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Sensitive: true, + ForceNew: true, + }, + "network_name": { + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(4, 256), + Required: true, + ForceNew: true, + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MinItems: 2, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "security_group_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 5, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "vpc_status": { + Type: schema.TypeString, + Computed: true, + }, + "error_messages": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_type": { + Type: schema.TypeString, + Computed: true, + }, + "error_message": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Optional: true, + Computed: true, + }, + "workspace_id": { + Type: schema.TypeInt, + Computed: true, + }, + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + "network_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceMWSNetworkCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + networkName := d.Get("network_name").(string) + mwsAcctId := d.Get("account_id").(string) + VPCID := d.Get("vpc_id").(string) + subnetIds := convertListInterfaceToString(d.Get("subnet_ids").(*schema.Set).List()) + securityGroupIds := convertListInterfaceToString(d.Get("security_group_ids").(*schema.Set).List()) + + network, err := client.MWSNetworks().Create(mwsAcctId, networkName, VPCID, subnetIds, securityGroupIds) + if err != nil { + return err + } + networksResourceId := PackagedMWSIds{ + MwsAcctId: mwsAcctId, + ResourceId: network.NetworkID, + } + d.SetId(packMWSAccountId(networksResourceId)) + return resourceMWSNetworkRead(d, m) +} + +func resourceMWSNetworkRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + network, err := client.MWSNetworks().Read(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + if err != nil { + if isMWSNetworkMissing(err.Error()) { + log.Printf("Missing e2 network with id: %s.", id) + d.SetId("") + return nil + } + return err + } + err = d.Set("network_name", network.NetworkName) + if err != nil { + return err + } + err = d.Set("vpc_id", network.VPCID) + if err != nil { + return err + } + err = d.Set("subnet_ids", network.SubnetIds) + if err != nil { + return err + } + err = d.Set("security_group_ids", network.SecurityGroupIds) + if err != nil { + return err + } + err = d.Set("vpc_status", network.VPCStatus) + if err != nil { + return err + } + + if !reflect.ValueOf(network.ErrorMessages).IsZero() { + err = d.Set("error_messages", convertErrorMessagesToListOfMaps(network.ErrorMessages)) + if err != nil { + return err + } + } + + err = d.Set("workspace_id", network.WorkspaceID) + if err != nil { + return err + } + err = d.Set("account_id", network.AccountID) + if err != nil { + return err + } + err = d.Set("creation_time", network.CreationTime) + if err != nil { + return err + } + err = d.Set("network_id", network.NetworkID) + if err != nil { + return err + } + + return nil +} + +func resourceMWSNetworkDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + err = client.MWSNetworks().Delete(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + return err +} + +func convertErrorMessagesToListOfMaps(errorMsgs []model.NetworkHealth) []map[string]string { + var resp []map[string]string + for _, errorMsg := range errorMsgs { + errorMap := map[string]string{} + errorMap["error_type"] = errorMsg.ErrorType + errorMap["error_message"] = errorMsg.ErrorMessage + resp = append(resp, errorMap) + } + return resp +} + +func isMWSNetworkMissing(errorMsg string) bool { + return strings.Contains(errorMsg, "RESOURCE_DOES_NOT_EXIST") +} diff --git a/databricks/resource_databricks_mws_networks_mws_test.go b/databricks/resource_databricks_mws_networks_mws_test.go new file mode 100644 index 000000000..66ca8ffaa --- /dev/null +++ b/databricks/resource_databricks_mws_networks_mws_test.go @@ -0,0 +1,153 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "os" + "testing" +) + +func TestAccMWSNetworks(t *testing.T) { + var MWSNetwork model.MWSNetwork + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsHost := os.Getenv("DATABRICKS_MWS_HOST") + networkName := "test-mws-network-tf" + + vpc := "vpc-11111111" + subnet1 := "subnet-11111111" + subnet2 := "subnet-99999999" + sg1 := "sg-11111111" + sg2 := "sg-99999999" + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testMWSNetworkResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSNetworkResourceExists("databricks_mws_networks.my_network", &MWSNetwork, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "network_name", networkName), + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSNetworkResourceExists("databricks_mws_networks.my_network", &MWSNetwork, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "network_name", networkName), + ), + ExpectNonEmptyPlan: false, + Destroy: false, + }, + { + PreConfig: func() { + conn := getMWSClient() + err := conn.MWSNetworks().Delete(MWSNetwork.AccountID, MWSNetwork.NetworkID) + if err != nil { + panic(err) + } + }, + // use a dynamic configuration with the random name from above + Config: testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSNetworkResourceExists("databricks_mws_networks.my_network", &MWSNetwork, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "network_name", networkName), + ), + ExpectNonEmptyPlan: false, + Destroy: false, + }, + }, + }) +} + +func testMWSNetworkResourceDestroy(s *terraform.State) error { + client := getMWSClient() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_mws_storage_configurations" { + continue + } + packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + if err != nil { + return err + } + _, err = client.MWSNetworks().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + if err != nil { + return nil + } + return errors.New("resource Scim Group is not cleaned up") + } + return nil +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testMWSNetworkResourceExists(n string, mwsCreds *model.MWSNetwork, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := getMWSClient() + packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + if err != nil { + return err + } + resp, err := conn.MWSNetworks().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *mwsCreds = resp + return nil + } +} + +func testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpcId, subnetId1, subnetId2, sgId1, sgId2 string) string { + return fmt.Sprintf(` + provider "databricks" { + host = "%s" + basic_auth {} + } + resource "databricks_mws_networks" "my_network" { + account_id = "%s" + network_name = "%s" + vpc_id = "%s" + subnet_ids = [ + "%s", + "%s", + ] + security_group_ids = [ + "%s", + "%s", + ] + } + + `, mwsHost, mwsAcctId, networkName, vpcId, subnetId1, subnetId2, sgId1, sgId2) +} diff --git a/databricks/resource_databricks_mws_storage_configurations.go b/databricks/resource_databricks_mws_storage_configurations.go new file mode 100644 index 000000000..9df0394c6 --- /dev/null +++ b/databricks/resource_databricks_mws_storage_configurations.go @@ -0,0 +1,115 @@ +package databricks + +import ( + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + "strings" +) + +func resourceMWSStorageConfigurations() *schema.Resource { + return &schema.Resource{ + Create: resourceMWSStorageConfigurationsCreate, + Read: resourceMWSStorageConfigurationsRead, + Delete: resourceMWSStorageConfigurationsDelete, + + Schema: map[string]*schema.Schema{ + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Sensitive: true, + ForceNew: true, + }, + "storage_configuration_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + "storage_configuration_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceMWSStorageConfigurationsCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + storageConfigurationName := d.Get("storage_configuration_name").(string) + bucketName := d.Get("bucket_name").(string) + mwsAcctId := d.Get("account_id").(string) + storageConfiguration, err := client.MWSStorageConfigurations().Create(mwsAcctId, storageConfigurationName, bucketName) + if err != nil { + return err + } + storageConfigurationResourceId := PackagedMWSIds{ + MwsAcctId: mwsAcctId, + ResourceId: storageConfiguration.StorageConfigurationID, + } + d.SetId(packMWSAccountId(storageConfigurationResourceId)) + return resourceMWSStorageConfigurationsRead(d, m) +} + +func resourceMWSStorageConfigurationsRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + storageConifiguration, err := client.MWSStorageConfigurations().Read(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + if err != nil { + if isE2StorageConfigurationsMissing(err.Error()) { + log.Printf("Missing mws storage configurations with id: %s.", id) + d.SetId("") + return nil + } + return err + } + err = d.Set("storage_configuration_name", storageConifiguration.StorageConfigurationName) + if err != nil { + return err + } + err = d.Set("bucket_name", storageConifiguration.RootBucketInfo.BucketName) + if err != nil { + return err + } + err = d.Set("account_id", storageConifiguration.AccountID) + if err != nil { + return err + } + err = d.Set("creation_time", storageConifiguration.CreationTime) + if err != nil { + return err + } + err = d.Set("storage_configuration_id", storageConifiguration.StorageConfigurationID) + if err != nil { + return err + } + + return nil +} + +func resourceMWSStorageConfigurationsDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + err = client.MWSStorageConfigurations().Delete(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + return err +} + +func isE2StorageConfigurationsMissing(errorMsg string) bool { + return strings.Contains(errorMsg, "RESOURCE_DOES_NOT_EXIST") +} diff --git a/databricks/resource_databricks_mws_storage_configurations_mws_test.go b/databricks/resource_databricks_mws_storage_configurations_mws_test.go new file mode 100644 index 000000000..35950da6d --- /dev/null +++ b/databricks/resource_databricks_mws_storage_configurations_mws_test.go @@ -0,0 +1,142 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "os" + "testing" +) + +func TestAccMWSStorageConfigurations(t *testing.T) { + var MWSStorageConfigurations model.MWSStorageConfigurations + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsHost := os.Getenv("DATABRICKS_MWS_HOST") + storageConfigName := "test-mws-storage-configurations-tf" + bucketName := "terraform-test-bucket" + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testMWSStorageConfigurationsResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSStorageConfigurationsResourceExists("databricks_mws_storage_configurations.my_mws_storage_configurations", &MWSStorageConfigurations, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "storage_configuration_name", storageConfigName), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "bucket_name", bucketName), + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSStorageConfigurationsResourceExists("databricks_mws_storage_configurations.my_mws_storage_configurations", &MWSStorageConfigurations, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "storage_configuration_name", storageConfigName), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "bucket_name", bucketName), + ), + ExpectNonEmptyPlan: false, + Destroy: false, + }, + { + PreConfig: func() { + conn := getMWSClient() + err := conn.MWSStorageConfigurations().Delete(MWSStorageConfigurations.AccountID, MWSStorageConfigurations.StorageConfigurationID) + if err != nil { + panic(err) + } + }, + // use a dynamic configuration with the random name from above + Config: testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testMWSStorageConfigurationsResourceExists("databricks_mws_storage_configurations.my_mws_storage_configurations", &MWSStorageConfigurations, t), + // verify local values + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "storage_configuration_name", storageConfigName), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "bucket_name", bucketName), + ), + ExpectNonEmptyPlan: false, + Destroy: false, + }, + }, + }) +} + +func testMWSStorageConfigurationsResourceDestroy(s *terraform.State) error { + client := getMWSClient() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_mws_storage_configurations" { + continue + } + packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + if err != nil { + return err + } + _, err = client.MWSStorageConfigurations().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + if err != nil { + return nil + } + return errors.New("resource Scim Group is not cleaned up") + } + return nil +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testMWSStorageConfigurationsResourceExists(n string, mwsCreds *model.MWSStorageConfigurations, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := getMWSClient() + packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + if err != nil { + return err + } + resp, err := conn.MWSStorageConfigurations().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *mwsCreds = resp + return nil + } +} + +func testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName string) string { + return fmt.Sprintf(` + provider "databricks" { + host = "%s" + basic_auth {} + } + resource "databricks_mws_storage_configurations" "my_mws_storage_configurations" { + account_id = "%s" + storage_configuration_name = "%s" + bucket_name = "%s" + } + `, mwsHost, mwsAcctId, storageConfigName, bucketName) +} diff --git a/databricks/resource_databricks_mws_workspaces.go b/databricks/resource_databricks_mws_workspaces.go new file mode 100644 index 000000000..bd51a9f24 --- /dev/null +++ b/databricks/resource_databricks_mws_workspaces.go @@ -0,0 +1,314 @@ +package databricks + +import ( + "bytes" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "reflect" + + "log" + "strconv" + "strings" + "time" +) + +func resourceMWSWorkspaces() *schema.Resource { + return &schema.Resource{ + Create: resourceMWSWorkspacesCreate, + Read: resourceMWSWorkspacesRead, + Update: resourceMWSWorkspacePatch, + Delete: resourceMWSWorkspacesDelete, + + Schema: map[string]*schema.Schema{ + "account_id": &schema.Schema{ + Type: schema.TypeString, + Sensitive: true, + Required: true, + ForceNew: true, + }, + "workspace_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "deployment_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "aws_region": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "us-west-1", + "us-west-2", + "us-east-1", + "sa-east-1", + "eu-west-1", + "eu-central-1", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "ca-central-1", + }, false), + }, + "credentials_id": { + Type: schema.TypeString, + Required: true, + }, + "storage_configuration_id": { + Type: schema.TypeString, + Required: true, + }, + "verify_workspace_runnning": { + Type: schema.TypeBool, + Required: true, + }, + "network_id": { + Type: schema.TypeString, + Optional: true, + }, + "is_no_public_ip_enabled": { + Type: schema.TypeBool, + Default: false, + Optional: true, + }, + "workspace_status": { + Type: schema.TypeString, + Computed: true, + }, + "workspace_status_message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "creation_time": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "workspace_id": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "workspace_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "network_error_messages": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_type": { + Type: schema.TypeString, + Computed: true, + }, + "error_message": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Computed: true, + }, + }, + } +} + +func resourceMWSWorkspacesCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + mwsAcctId := d.Get("account_id").(string) + workspaceName := d.Get("workspace_name").(string) + deploymentName := d.Get("deployment_name").(string) + awsRegion := d.Get("aws_region").(string) + credentialsID := d.Get("credentials_id").(string) + storageConfigurationID := d.Get("storage_configuration_id").(string) + networkID := d.Get("network_id").(string) + isNoPublicIpEnabled := d.Get("is_no_public_ip_enabled").(bool) + var workspace model.MWSWorkspace + var err error + workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, "", isNoPublicIpEnabled) + // Sometimes workspaces api is buggy + if err != nil { + time.Sleep(15 * time.Second) + workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, "", isNoPublicIpEnabled) + if err != nil { + return err + } + } + workspaceResourceId := PackagedMWSIds{ + MwsAcctId: mwsAcctId, + ResourceId: strconv.Itoa(int(workspace.WorkspaceID)), + } + d.SetId(packMWSAccountId(workspaceResourceId)) + err = client.MWSWorkspaces().WaitForWorkspaceRunning(mwsAcctId, workspace.WorkspaceID, 10, 180) + if err != nil { + if !reflect.ValueOf(networkID).IsZero() { + network, networkReadErr := client.MWSNetworks().Read(mwsAcctId, networkID) + if networkReadErr != nil { + return fmt.Errorf("Workspace failed to create: %v, network read failure error: %v", err, networkReadErr) + } + return fmt.Errorf("Workspace failed to create: %v, network error message: %v", err, getNetworkErrors(network.ErrorMessages)) + } + return err + } + return resourceMWSWorkspacesRead(d, m) +} + +func resourceMWSWorkspacesRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + idInt64, err := strconv.ParseInt(packagedMwsId.ResourceId, 10, 64) + if err != nil { + return err + } + + workspace, err := client.MWSWorkspaces().Read(packagedMwsId.MwsAcctId, idInt64) + if err != nil { + if isMWSWorkspaceMissing(err.Error(), id) { + log.Printf("Missing e2 workspace with id: %s.", id) + d.SetId("") + return nil + } + return err + } + + err = client.MWSWorkspaces().WaitForWorkspaceRunning(packagedMwsId.MwsAcctId, idInt64, 10, 180) + if err != nil { + log.Println("WORKSPACE IS NOT RUNNING") + err2 := d.Set("verify_workspace_runnning", false) + if err2 != nil { + return err2 + } + } else { + err2 := d.Set("verify_workspace_runnning", true) + if err2 != nil { + return err2 + } + } + + err = d.Set("deployment_name", workspace.DeploymentName) + if err != nil { + return err + } + err = d.Set("aws_region", workspace.AwsRegion) + if err != nil { + return err + } + err = d.Set("credentials_id", workspace.CredentialsID) + if err != nil { + return err + } + err = d.Set("storage_configuration_id", workspace.StorageConfigurationID) + if err != nil { + return err + } + err = d.Set("network_id", workspace.NetworkID) + if err != nil { + return err + } + err = d.Set("account_id", workspace.AccountID) + if err != nil { + return err + } + err = d.Set("workspace_status", workspace.WorkspaceStatus) + if err != nil { + return err + } + + if workspace.WorkspaceStatus != model.WorkspaceStatusRunning { + network, err := client.MWSNetworks().Read(workspace.AccountID, workspace.NetworkID) + if err == nil && !reflect.ValueOf(network.ErrorMessages).IsZero() { + err = d.Set("network_error_messages", convertErrorMessagesToListOfMaps(network.ErrorMessages)) + if err != nil { + return err + } + } + } else { + err = d.Set("network_error_messages", []map[string]string{{"error_type": "", "error_message": ""}}) + if err != nil { + return err + } + } + + err = d.Set("workspace_status_message", workspace.WorkspaceStatusMessage) + if err != nil { + return err + } + err = d.Set("creation_time", workspace.CreationTime) + if err != nil { + return err + } + err = d.Set("workspace_id", workspace.WorkspaceID) + if err != nil { + return err + } + err = d.Set("workspace_url", fmt.Sprintf("https://%s.cloud.databricks.com", workspace.DeploymentName)) + if err != nil { + return err + } + return nil +} + +func resourceMWSWorkspacePatch(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + idInt64, err := strconv.ParseInt(packagedMwsId.ResourceId, 10, 64) + if err != nil { + return err + } + awsRegion := d.Get("aws_region").(string) + credentialsID := d.Get("credentials_id").(string) + storageConfigurationID := d.Get("storage_configuration_id").(string) + networkID := d.Get("network_id").(string) + isNoPublicIpEnabled := d.Get("is_no_public_ip_enabled").(bool) + + err = client.MWSWorkspaces().Patch(packagedMwsId.MwsAcctId, idInt64, awsRegion, credentialsID, storageConfigurationID, networkID, isNoPublicIpEnabled) + if err != nil { + return err + } + err = client.MWSWorkspaces().WaitForWorkspaceRunning(packagedMwsId.MwsAcctId, idInt64, 10, 180) + if err != nil { + return err + } + return resourceMWSWorkspacesRead(d, m) +} + +func resourceMWSWorkspacesDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + packagedMwsId, err := unpackMWSAccountId(id) + if err != nil { + return err + } + idInt64, err := strconv.ParseInt(packagedMwsId.ResourceId, 10, 64) + if err != nil { + return err + } + err = client.MWSWorkspaces().Delete(packagedMwsId.MwsAcctId, idInt64) + return err +} + +func getNetworkErrors(networkRespList []model.NetworkHealth) string { + var strBuffer bytes.Buffer + for _, networkHealth := range networkRespList { + strBuffer.WriteString(fmt.Sprintf("error: %s;error_msg: %s;", networkHealth.ErrorType, networkHealth.ErrorMessage)) + } + return strBuffer.String() +} + +func isMWSWorkspaceMissing(errorMsg, resourceID string) bool { + return strings.Contains(errorMsg, "RESOURCE_DOES_NOT_EXIST") && + strings.Contains(errorMsg, fmt.Sprintf("workspace %s does not exist", resourceID)) +} diff --git a/databricks/resource_databricks_mws_workspaces_mws_test.go b/databricks/resource_databricks_mws_workspaces_mws_test.go new file mode 100644 index 000000000..f5150fedf --- /dev/null +++ b/databricks/resource_databricks_mws_workspaces_mws_test.go @@ -0,0 +1,121 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "os" + "strconv" + "testing" +) + +func TestAccMWSWorkspaces(t *testing.T) { + //var MWSWorkspaces model.MWSWorkspace + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsHost := os.Getenv("DATABRICKS_MWS_HOST") + credentialsName := "tf-workspace-test-creds" + roleArn := os.Getenv("TEST_MWS_CROSS_ACCT_ROLE") + storageConfigName := "tf-workspace-storage-config" + bucketName := os.Getenv("TEST_MWS_ROOT_BUCKET") + workspaceName := fmt.Sprintf("tf-test-workspace-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) + deploymentName := fmt.Sprintf("%s-dep", workspaceName) + awsRegion := os.Getenv("DATABRICKS_MWS_AWS_REGION") + networkName := "tf-workspace-test-network" + vpcId := os.Getenv("TEST_MWS_VPC_ID") + subnet1 := os.Getenv("TEST_MWS_SUBNET_1") + subnet2 := os.Getenv("TEST_MWS_SUBNET_2") + sg := os.Getenv("TEST_MWS_SG") + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testMWSWorkspacesResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testMWSWorkspacesCreate(mwsAcctId, mwsHost, credentialsName, roleArn, storageConfigName, bucketName, + workspaceName, deploymentName, awsRegion, networkName, vpcId, subnet1, subnet2, sg), + //// compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // verify that after creation the workspace_status is in a running state. + resource.TestCheckResourceAttr("databricks_mws_workspaces.my_mws_workspace", "workspace_status", "RUNNING"), + ), + Destroy: false, + }, + }, + }) +} + +func testMWSWorkspacesResourceDestroy(s *terraform.State) error { + client := getMWSClient() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_mws_workspaces" { + continue + } + packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + if err != nil { + return err + } + idInt64, err := strconv.ParseInt(packagedMWSIds.ResourceId, 10, 64) + if err != nil { + return err + } + _, err = client.MWSWorkspaces().Read(packagedMWSIds.MwsAcctId, idInt64) + if err != nil { + return nil + } + return errors.New("resource Scim Group is not cleaned up") + } + return nil +} + +func testMWSWorkspacesCreate(mwsAcctId, mwsHost, credentialsName, roleArn, storageConfigName, bucketName, + workspaceName, deploymentName, awsRegion, networkName, vpcId, subnet1, subnet2, sg string) string { + return fmt.Sprintf(` + provider "databricks" { + host = "%[1]s" + basic_auth {} + } + resource "databricks_mws_credentials" "my_mws_credentials" { + account_id = "%[2]s" + credentials_name = "%[3]s" + role_arn = "%[4]s" + } + + resource "databricks_mws_storage_configurations" "my_mws_storage_configurations" { + account_id = "%[2]s" + storage_configuration_name = "%[5]s" + bucket_name = "%[6]s" + } + resource "databricks_mws_networks" "my_network" { + account_id = "%[2]s" + network_name = "%[10]s" + vpc_id = "%[11]s" + subnet_ids = [ + "%[12]s", + "%[13]s", + ] + security_group_ids = [ + "%[14]s", + ] + } + resource "databricks_mws_workspaces" "my_mws_workspace" { + account_id = "%[2]s" + workspace_name = "%[7]s" + deployment_name = "%[8]s" + aws_region = "%[9]s" + credentials_id = databricks_mws_credentials.my_mws_credentials.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.my_mws_storage_configurations.storage_configuration_id + network_id = databricks_mws_networks.my_network.network_id + verify_workspace_runnning = true + } + `, mwsHost, mwsAcctId, credentialsName, roleArn, storageConfigName, bucketName, + workspaceName, deploymentName, awsRegion, networkName, vpcId, subnet1, subnet2, sg) +} diff --git a/databricks/resource_databricks_scim_group_aws_test.go b/databricks/resource_databricks_scim_group_aws_test.go index 10e46ca63..bd7f342d4 100644 --- a/databricks/resource_databricks_scim_group_aws_test.go +++ b/databricks/resource_databricks_scim_group_aws_test.go @@ -1,5 +1,3 @@ -// +build aws - package databricks import ( @@ -13,7 +11,7 @@ import ( "testing" ) -func TestAccScimGroupResource(t *testing.T) { +func TestAccAwsScimGroupResource(t *testing.T) { //var secretScope model.Secre var ScimGroup model.Group // generate a random name for each tokenInfo test run, to avoid @@ -29,19 +27,18 @@ func TestAccScimGroupResource(t *testing.T) { expectEntitlements := []model.EntitlementsListItem{{Value: model.AllowClusterCreateEntitlement}} resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testScimGroupResourceDestroy, + CheckDestroy: testAwsScimGroupResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testScimGroupResourceCreate(userName, displayName, groupName, role, entitlement), + Config: testAwsScimGroupResourceCreate(userName, displayName, groupName, role, entitlement), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), + testAwsScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, + testAwsScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, []model.RoleListItem{{Value: role}}, true), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group", "display_name", displayName), @@ -53,14 +50,14 @@ func TestAccScimGroupResource(t *testing.T) { }, { // use a dynamic configuration with the random name from above - Config: testScimGroupResourceUpdate(groupName), + Config: testAwsScimGroupResourceUpdate(groupName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), + testAwsScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, nil, nil, false), + testAwsScimGroupValues(t, &ScimGroup, displayName, nil, nil, false), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group", "display_name", displayName), resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group", "entitlements.#", "0"), @@ -71,13 +68,13 @@ func TestAccScimGroupResource(t *testing.T) { }, { // Recreate the group with roles and entitlements again to see if the group gets updated - Config: testScimGroupResourceCreate(userName, displayName, groupName, role, entitlement), + Config: testAwsScimGroupResourceCreate(userName, displayName, groupName, role, entitlement), Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), + testAwsScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, + testAwsScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, []model.RoleListItem{{Value: role}}, true), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group", "display_name", displayName), @@ -93,13 +90,13 @@ func TestAccScimGroupResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testScimGroupResourceUpdate(displayName), + Config: testAwsScimGroupResourceUpdate(displayName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), + testAwsScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, nil, nil, false), + testAwsScimGroupValues(t, &ScimGroup, displayName, nil, nil, false), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group", "display_name", displayName), resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group", "entitlements.#", "0"), @@ -110,13 +107,13 @@ func TestAccScimGroupResource(t *testing.T) { }, { // Recreate the group with roles and entitlements again to see if the group gets updated - Config: testScimGroupResourceInheritedRole(userName, displayName, groupName, role, entitlement), + Config: testAwsScimGroupResourceInheritedRole(userName, displayName, groupName, role, entitlement), Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), + testAwsScimGroupResourceExists("databricks_scim_group.my_scim_group", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, + testAwsScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, []model.RoleListItem{{Value: role}}, true), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group", "display_name", displayName), @@ -130,7 +127,7 @@ func TestAccScimGroupResource(t *testing.T) { }) } -func testScimGroupResourceDestroy(s *terraform.State) error { +func testAwsScimGroupResourceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_scim_group" { @@ -145,7 +142,7 @@ func testScimGroupResourceDestroy(s *terraform.State) error { return nil } -func testScimGroupValues(t *testing.T, group *model.Group, displayName string, expectEntitlements []model.EntitlementsListItem, expectRoles []model.RoleListItem, verifyMembers bool) resource.TestCheckFunc { +func testAwsScimGroupValues(t *testing.T, group *model.Group, displayName string, expectEntitlements []model.EntitlementsListItem, expectRoles []model.RoleListItem, verifyMembers bool) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, group.DisplayName == displayName) assert.EqualValues(t, group.Entitlements, expectEntitlements) @@ -156,7 +153,7 @@ func testScimGroupValues(t *testing.T, group *model.Group, displayName string, e } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testScimGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { +func testAwsScimGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -177,7 +174,7 @@ func testScimGroupResourceExists(n string, group *model.Group, t *testing.T) res } } -func testScimGroupResourceCreate(username, displayName, groupName, role, entitlement string) string { +func testAwsScimGroupResourceCreate(username, displayName, groupName, role, entitlement string) string { return fmt.Sprintf(` resource "databricks_instance_profile" "instance_profile" { instance_profile_arn = "%s" @@ -210,7 +207,7 @@ func testScimGroupResourceCreate(username, displayName, groupName, role, entitle `, role, username, displayName, groupName, entitlement) } -func testScimGroupResourceUpdate(groupName string) string { +func testAwsScimGroupResourceUpdate(groupName string) string { return fmt.Sprintf(` resource "databricks_scim_group" "my_scim_group" { display_name = "%s" @@ -218,7 +215,7 @@ func testScimGroupResourceUpdate(groupName string) string { `, groupName) } -func testScimGroupResourceInheritedRole(username, displayName, groupName, role, entitlement string) string { +func testAwsScimGroupResourceInheritedRole(username, displayName, groupName, role, entitlement string) string { return fmt.Sprintf(` resource "databricks_instance_profile" "instance_profile" { instance_profile_arn = "%s" diff --git a/databricks/resource_databricks_scim_group_azure_test.go b/databricks/resource_databricks_scim_group_azure_test.go index 7603ad639..131fff685 100644 --- a/databricks/resource_databricks_scim_group_azure_test.go +++ b/databricks/resource_databricks_scim_group_azure_test.go @@ -1,5 +1,3 @@ -// +build azure - package databricks import ( @@ -13,7 +11,7 @@ import ( "testing" ) -func TestAccScimGroupResource(t *testing.T) { +func TestAccAzureScimGroupResource(t *testing.T) { //var secretScope model.Secre var ScimGroup model.Group // generate a random name for each tokenInfo test run, to avoid @@ -27,19 +25,18 @@ func TestAccScimGroupResource(t *testing.T) { expectEntitlements := []model.EntitlementsListItem{{Value: model.AllowClusterCreateEntitlement}} resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testScimGroupResourceDestroy, + CheckDestroy: testAzureScimGroupResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testScimGroupResourceCreate(userName, displayName, groupName, entitlement), + Config: testAzureScimGroupResourceCreate(userName, displayName, groupName, entitlement), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), + testAzureScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, true), + testAzureScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, true), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "display_name", displayName), resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "entitlements.#", "1"), @@ -49,14 +46,14 @@ func TestAccScimGroupResource(t *testing.T) { }, { // use a dynamic configuration with the random name from above - Config: testScimGroupResourceUpdate(groupName), + Config: testAzureScimGroupResourceUpdate(groupName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), + testAzureScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, nil, false), + testAzureScimGroupValues(t, &ScimGroup, displayName, nil, false), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "display_name", displayName), resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "entitlements.#", "0"), @@ -66,13 +63,13 @@ func TestAccScimGroupResource(t *testing.T) { }, { // Recreate the group with roles and entitlements again to see if the group gets updated - Config: testScimGroupResourceCreate(userName, displayName, groupName, entitlement), + Config: testAzureScimGroupResourceCreate(userName, displayName, groupName, entitlement), Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), + testAzureScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, true), + testAzureScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, true), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "display_name", displayName), @@ -87,13 +84,13 @@ func TestAccScimGroupResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testScimGroupResourceUpdate(displayName), + Config: testAzureScimGroupResourceUpdate(displayName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), + testAzureScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, nil, false), + testAzureScimGroupValues(t, &ScimGroup, displayName, nil, false), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "display_name", displayName), resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "entitlements.#", "0"), @@ -103,13 +100,13 @@ func TestAccScimGroupResource(t *testing.T) { }, { // Recreate the group with roles and entitlements again to see if the group gets updated - Config: testScimGroupResourceInheritedRole(userName, displayName, groupName, entitlement), + Config: testAzureScimGroupResourceInheritedRole(userName, displayName, groupName, entitlement), Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), + testAzureScimGroupResourceExists("databricks_scim_group.my_scim_group_test", &ScimGroup, t), // verify remote values - testScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, true), + testAzureScimGroupValues(t, &ScimGroup, displayName, expectEntitlements, true), // verify local values resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "display_name", displayName), resource.TestCheckResourceAttr("databricks_scim_group.my_scim_group_test", "entitlements.#", "1"), @@ -121,7 +118,7 @@ func TestAccScimGroupResource(t *testing.T) { }) } -func testScimGroupResourceDestroy(s *terraform.State) error { +func testAzureScimGroupResourceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_scim_group" { @@ -136,7 +133,7 @@ func testScimGroupResourceDestroy(s *terraform.State) error { return nil } -func testScimGroupValues(t *testing.T, group *model.Group, displayName string, expectEntitlements []model.EntitlementsListItem, verifyMembers bool) resource.TestCheckFunc { +func testAzureScimGroupValues(t *testing.T, group *model.Group, displayName string, expectEntitlements []model.EntitlementsListItem, verifyMembers bool) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, group.DisplayName == displayName) assert.EqualValues(t, group.Entitlements, expectEntitlements) @@ -146,7 +143,7 @@ func testScimGroupValues(t *testing.T, group *model.Group, displayName string, e } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testScimGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { +func testAzureScimGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -167,7 +164,7 @@ func testScimGroupResourceExists(n string, group *model.Group, t *testing.T) res } } -func testScimGroupResourceCreate(username, displayName, groupName, entitlement string) string { +func testAzureScimGroupResourceCreate(username, displayName, groupName, entitlement string) string { return fmt.Sprintf(` resource "databricks_scim_user" "my_scim_group_test_user" { user_name = "%s" @@ -187,7 +184,7 @@ func testScimGroupResourceCreate(username, displayName, groupName, entitlement s `, username, displayName, groupName, entitlement) } -func testScimGroupResourceUpdate(groupName string) string { +func testAzureScimGroupResourceUpdate(groupName string) string { return fmt.Sprintf(` resource "databricks_scim_group" "my_scim_group_test" { @@ -196,7 +193,7 @@ func testScimGroupResourceUpdate(groupName string) string { `, groupName) } -func testScimGroupResourceInheritedRole(username, displayName, groupName, entitlement string) string { +func testAzureScimGroupResourceInheritedRole(username, displayName, groupName, entitlement string) string { return fmt.Sprintf(` resource "databricks_scim_user" "my_scim_group_test_user" { user_name = "%s" diff --git a/databricks/resource_databricks_scim_user_aws_test.go b/databricks/resource_databricks_scim_user_aws_test.go index 425ca6417..e2c9bb766 100644 --- a/databricks/resource_databricks_scim_user_aws_test.go +++ b/databricks/resource_databricks_scim_user_aws_test.go @@ -1,5 +1,3 @@ -// +build aws - package databricks import ( @@ -13,7 +11,7 @@ import ( "testing" ) -func TestAccScimUserResource(t *testing.T) { +func TestAccAwsScimUserResource(t *testing.T) { //var secretScope model.Secre var scimUser model.User // generate a random name for each tokenInfo test run, to avoid @@ -26,7 +24,6 @@ func TestAccScimUserResource(t *testing.T) { expectEntitlements := []model.EntitlementsListItem{{Value: model.AllowClusterCreateEntitlement}} resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testScimUserResourceDestroy, Steps: []resource.TestStep{ diff --git a/databricks/resource_databricks_scim_user_azure_test.go b/databricks/resource_databricks_scim_user_azure_test.go index 8dafea399..eae4c2d86 100644 --- a/databricks/resource_databricks_scim_user_azure_test.go +++ b/databricks/resource_databricks_scim_user_azure_test.go @@ -1,5 +1,3 @@ -// +build azure - package databricks import ( @@ -16,7 +14,7 @@ import ( "testing" ) -func TestAccScimUserResource(t *testing.T) { +func TestAccAzureScimUserResource(t *testing.T) { //var secretScope model.Secre var scimUser model.User // generate a random name for each tokenInfo test run, to avoid @@ -29,20 +27,19 @@ func TestAccScimUserResource(t *testing.T) { expectEntitlements := []model.EntitlementsListItem{{Value: model.AllowClusterCreateEntitlement}} resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testScimUserResourceDestroy, + CheckDestroy: testAzureScimUserResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testScimUserResourceCreate(userName, displayName), + Config: testAzureScimUserResourceCreate(userName, displayName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), + testAzureScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), // verify remote values - testScimUserValues(t, &scimUser, userName, displayName, expectEntitlements, "0"), + testAzureScimUserValues(t, &scimUser, userName, displayName, expectEntitlements, "0"), // verify local values resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "user_name", userName), resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "display_name", displayName), @@ -52,14 +49,14 @@ func TestAccScimUserResource(t *testing.T) { }, { // use a dynamic configuration with the random name from above - Config: testScimUserResourceUpdate(userName, displayName), + Config: testAzureScimUserResourceUpdate(userName, displayName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), + testAzureScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), // verify remote values - testScimUserValues(t, &scimUser, userName, displayName, nil, "1"), + testAzureScimUserValues(t, &scimUser, userName, displayName, nil, "1"), // verify local values resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "user_name", userName), resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "display_name", displayName), @@ -69,13 +66,13 @@ func TestAccScimUserResource(t *testing.T) { }, { // Recreate the user with roles and entitlements again to see if the user gets updated - Config: testScimUserResourceCreate(userName, displayName), + Config: testAzureScimUserResourceCreate(userName, displayName), Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), + testAzureScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), // verify remote values - testScimUserValues(t, &scimUser, userName, displayName, expectEntitlements, "2"), + testAzureScimUserValues(t, &scimUser, userName, displayName, expectEntitlements, "2"), // verify local values resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "user_name", userName), resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "display_name", displayName), @@ -89,13 +86,13 @@ func TestAccScimUserResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testScimUserResourceUpdate(userName, displayName), + Config: testAzureScimUserResourceUpdate(userName, displayName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), + testAzureScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), // verify remote values - testScimUserValues(t, &scimUser, userName, displayName, nil, "3"), + testAzureScimUserValues(t, &scimUser, userName, displayName, nil, "3"), // verify local values resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "user_name", userName), resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "display_name", displayName), @@ -110,13 +107,13 @@ func TestAccScimUserResource(t *testing.T) { assert.NoError(t, err, err) }, // Create new admin user - Config: testScimUserResourceSetAdmin(userName, displayName, true), + Config: testAzureScimUserResourceSetAdmin(userName, displayName, true), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), + testAzureScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), // verify remote values - testScimUserValues(t, &scimUser, userName, displayName, nil, "4"), + testAzureScimUserValues(t, &scimUser, userName, displayName, nil, "4"), // verify local values resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "user_name", userName), resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "display_name", displayName), @@ -127,13 +124,13 @@ func TestAccScimUserResource(t *testing.T) { }, { // Update admin to false - Config: testScimUserResourceSetAdmin(userName, displayName, false), + Config: testAzureScimUserResourceSetAdmin(userName, displayName, false), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), + testAzureScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), // verify remote values - testScimUserValues(t, &scimUser, userName, displayName, nil, "5"), + testAzureScimUserValues(t, &scimUser, userName, displayName, nil, "5"), // verify local values resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "user_name", userName), resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "display_name", displayName), @@ -144,13 +141,13 @@ func TestAccScimUserResource(t *testing.T) { }, { // Update admin back to true - Config: testScimUserResourceSetAdmin(userName, displayName, true), + Config: testAzureScimUserResourceSetAdmin(userName, displayName, true), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), + testAzureScimUserResourceExists("databricks_scim_user.my_scim_user", &scimUser, t), // verify remote values - testScimUserValues(t, &scimUser, userName, displayName, nil, "6"), + testAzureScimUserValues(t, &scimUser, userName, displayName, nil, "6"), // verify local values resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "user_name", userName), resource.TestCheckResourceAttr("databricks_scim_user.my_scim_user", "display_name", displayName), @@ -163,7 +160,7 @@ func TestAccScimUserResource(t *testing.T) { }) } -func testScimUserResourceDestroy(s *terraform.State) error { +func testAzureScimUserResourceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_scim_user" { @@ -178,7 +175,7 @@ func testScimUserResourceDestroy(s *terraform.State) error { return nil } -func testScimUserValues(t *testing.T, user *model.User, userName, displayName string, expectEntitlements []model.EntitlementsListItem, step string) resource.TestCheckFunc { +func testAzureScimUserValues(t *testing.T, user *model.User, userName, displayName string, expectEntitlements []model.EntitlementsListItem, step string) resource.TestCheckFunc { return func(s *terraform.State) error { var errorMsg bytes.Buffer @@ -206,7 +203,7 @@ func testScimUserValues(t *testing.T, user *model.User, userName, displayName st } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testScimUserResourceExists(n string, user *model.User, t *testing.T) resource.TestCheckFunc { +func testAzureScimUserResourceExists(n string, user *model.User, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -228,7 +225,7 @@ func testScimUserResourceExists(n string, user *model.User, t *testing.T) resour } } -func testScimUserResourceCreate(username, displayName string) string { +func testAzureScimUserResourceCreate(username, displayName string) string { return fmt.Sprintf(` resource "databricks_scim_user" "my_scim_user" { user_name = "%s" @@ -241,7 +238,7 @@ func testScimUserResourceCreate(username, displayName string) string { `, username, displayName) } -func testScimUserResourceUpdate(username, displayName string) string { +func testAzureScimUserResourceUpdate(username, displayName string) string { return fmt.Sprintf(` resource "databricks_scim_user" "my_scim_user" { user_name = "%s" @@ -251,7 +248,7 @@ func testScimUserResourceUpdate(username, displayName string) string { `, username, displayName) } -func testScimUserResourceSetAdmin(username, displayName string, setAdmin bool) string { +func testAzureScimUserResourceSetAdmin(username, displayName string, setAdmin bool) string { return fmt.Sprintf(` resource "databricks_scim_user" "my_scim_user" { user_name = "%s" diff --git a/databricks/resource_databricks_secret_acl_test.go b/databricks/resource_databricks_secret_acl_aws_test.go similarity index 81% rename from databricks/resource_databricks_secret_acl_test.go rename to databricks/resource_databricks_secret_acl_aws_test.go index 9e78a70c2..de6f8f0ed 100644 --- a/databricks/resource_databricks_secret_acl_test.go +++ b/databricks/resource_databricks_secret_acl_aws_test.go @@ -11,7 +11,7 @@ import ( "testing" ) -func TestAccSecretAclResource(t *testing.T) { +func TestAccAwsSecretAclResource(t *testing.T) { //var secretScope model.Secre var secretACL model.ACLItem // generate a random name for each tokenInfo test run, to avoid @@ -24,19 +24,18 @@ func TestAccSecretAclResource(t *testing.T) { permission := "READ" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testSecretACLResourceDestroy, + CheckDestroy: testAwsSecretACLResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testSecretACLResource(scope, principal, permission), + Config: testAwsSecretACLResource(scope, principal, permission), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testSecretACLResourceExists("databricks_secret_acl.my_secret_acl", &secretACL, t), + testAwsSecretACLResourceExists("databricks_secret_acl.my_secret_acl", &secretACL, t), // verify remote values - testSecretACLValues(t, &secretACL, permission, principal), + testAwsSecretACLValues(t, &secretACL, permission, principal), // verify local values resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "scope", scope), resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "principal", principal), @@ -50,13 +49,13 @@ func TestAccSecretAclResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testSecretACLResource(scope, principal, permission), + Config: testAwsSecretACLResource(scope, principal, permission), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testSecretACLResourceExists("databricks_secret_acl.my_secret_acl", &secretACL, t), + testAwsSecretACLResourceExists("databricks_secret_acl.my_secret_acl", &secretACL, t), // verify remote values - testSecretACLValues(t, &secretACL, permission, principal), + testAwsSecretACLValues(t, &secretACL, permission, principal), // verify local values resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "scope", scope), resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "principal", principal), @@ -67,7 +66,7 @@ func TestAccSecretAclResource(t *testing.T) { }) } -func testSecretACLResourceDestroy(s *terraform.State) error { +func testAwsSecretACLResourceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_secret" && rs.Type != "databricks_secret_scope" { @@ -85,7 +84,7 @@ func testSecretACLResourceDestroy(s *terraform.State) error { return nil } -func testSecretACLValues(t *testing.T, acl *model.ACLItem, permission, principal string) resource.TestCheckFunc { +func testAwsSecretACLValues(t *testing.T, acl *model.ACLItem, permission, principal string) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, acl.Permission == model.ACLPermissionRead) assert.True(t, acl.Principal == principal) @@ -94,7 +93,7 @@ func testSecretACLValues(t *testing.T, acl *model.ACLItem, permission, principal } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testSecretACLResourceExists(n string, aclItem *model.ACLItem, t *testing.T) resource.TestCheckFunc { +func testAwsSecretACLResourceExists(n string, aclItem *model.ACLItem, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -118,7 +117,7 @@ func testSecretACLResourceExists(n string, aclItem *model.ACLItem, t *testing.T) } // testAccTokenResource returns an configuration for an Example Widget with the provided name -func testSecretACLResource(scopeName, principal, permission string) string { +func testAwsSecretACLResource(scopeName, principal, permission string) string { return fmt.Sprintf(` resource "databricks_secret_scope" "my_scope" { name = "%s" diff --git a/databricks/resource_databricks_secret_acl_azure_test.go b/databricks/resource_databricks_secret_acl_azure_test.go new file mode 100644 index 000000000..2536a390c --- /dev/null +++ b/databricks/resource_databricks_secret_acl_azure_test.go @@ -0,0 +1,131 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAzureSecretAclResource(t *testing.T) { + //var secretScope model.Secre + var secretACL model.ACLItem + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + scope := "terraform_acc_test_acl" + principal := "users" + permission := "READ" + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAzureSecretACLResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAzureSecretACLResource(scope, principal, permission), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureSecretACLResourceExists("databricks_secret_acl.my_secret_acl", &secretACL, t), + // verify remote values + testAzureSecretACLValues(t, &secretACL, permission, principal), + // verify local values + resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "scope", scope), + resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "principal", principal), + resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "permission", permission), + ), + }, + { + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + err := client.SecretAcls().Delete(scope, principal) + assert.NoError(t, err, err) + }, + // use a dynamic configuration with the random name from above + Config: testAzureSecretACLResource(scope, principal, permission), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureSecretACLResourceExists("databricks_secret_acl.my_secret_acl", &secretACL, t), + // verify remote values + testAzureSecretACLValues(t, &secretACL, permission, principal), + // verify local values + resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "scope", scope), + resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "principal", principal), + resource.TestCheckResourceAttr("databricks_secret_acl.my_secret_acl", "permission", permission), + ), + }, + }, + }) +} + +func testAzureSecretACLResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_secret" && rs.Type != "databricks_secret_scope" { + continue + } + _, err := client.SecretAcls().Read(rs.Primary.Attributes["scope"], rs.Primary.Attributes["principal"]) + if err == nil { + return errors.New("resource secret acl is not cleaned up") + } + _, err = client.SecretScopes().Read(rs.Primary.Attributes["scope"]) + if err == nil { + return errors.New("resource secret is not cleaned up") + } + } + return nil +} + +func testAzureSecretACLValues(t *testing.T, acl *model.ACLItem, permission, principal string) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, acl.Permission == model.ACLPermissionRead) + assert.True(t, acl.Principal == principal) + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAzureSecretACLResourceExists(n string, aclItem *model.ACLItem, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.SecretAcls().Read(rs.Primary.Attributes["scope"], rs.Primary.Attributes["principal"]) + //t.Log(resp) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *aclItem = resp + return nil + //return fmt.Errorf("Token (%s) not found", rs.Primary.ID) + } +} + +// testAccTokenResource returns an configuration for an Example Widget with the provided name +func testAzureSecretACLResource(scopeName, principal, permission string) string { + return fmt.Sprintf(` + resource "databricks_secret_scope" "my_scope" { + name = "%s" + } + resource "databricks_secret_acl" "my_secret_acl" { + principal = "%s" + permission = "%s" + scope = databricks_secret_scope.my_scope.name + } + `, scopeName, principal, permission) +} diff --git a/databricks/resource_databricks_secret_test.go b/databricks/resource_databricks_secret_aws_test.go similarity index 83% rename from databricks/resource_databricks_secret_test.go rename to databricks/resource_databricks_secret_aws_test.go index e94a69981..2fccc0dc2 100644 --- a/databricks/resource_databricks_secret_test.go +++ b/databricks/resource_databricks_secret_aws_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestAccSecretResource(t *testing.T) { +func TestAccAwsSecretResource(t *testing.T) { //var secretScope model.Secre var secret model.SecretMetadata // generate a random name for each tokenInfo test run, to avoid @@ -25,19 +25,18 @@ func TestAccSecretResource(t *testing.T) { stringValue := "my super secret key" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testSecretResourceDestroy, + CheckDestroy: testAwsSecretResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testSecretResource(scope, key, stringValue), + Config: testAwsSecretResource(scope, key, stringValue), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testSecretResourceExists("databricks_secret.my_secret", &secret, t), + testAwsSecretResourceExists("databricks_secret.my_secret", &secret, t), // verify remote values - testSecretValues(t, &secret, key), + testAwsSecretValues(t, &secret, key), // verify local values resource.TestCheckResourceAttr("databricks_secret.my_secret", "scope", scope), resource.TestCheckResourceAttr("databricks_secret.my_secret", "key", key), @@ -52,13 +51,13 @@ func TestAccSecretResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testSecretResource(scope, key, stringValue), + Config: testAwsSecretResource(scope, key, stringValue), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testSecretResourceExists("databricks_secret.my_secret", &secret, t), + testAwsSecretResourceExists("databricks_secret.my_secret", &secret, t), // verify remote values - testSecretValues(t, &secret, key), + testAwsSecretValues(t, &secret, key), // verify local values resource.TestCheckResourceAttr("databricks_secret.my_secret", "scope", scope), resource.TestCheckResourceAttr("databricks_secret.my_secret", "key", key), @@ -73,13 +72,13 @@ func TestAccSecretResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testSecretResource(scope, key, stringValue), + Config: testAwsSecretResource(scope, key, stringValue), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testSecretResourceExists("databricks_secret.my_secret", &secret, t), + testAwsSecretResourceExists("databricks_secret.my_secret", &secret, t), // verify remote values - testSecretValues(t, &secret, key), + testAwsSecretValues(t, &secret, key), // verify local values resource.TestCheckResourceAttr("databricks_secret.my_secret", "scope", scope), resource.TestCheckResourceAttr("databricks_secret.my_secret", "key", key), @@ -90,7 +89,7 @@ func TestAccSecretResource(t *testing.T) { }) } -func testSecretResourceDestroy(s *terraform.State) error { +func testAwsSecretResourceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_secret" && rs.Type != "databricks_secret_scope" { @@ -109,7 +108,7 @@ func testSecretResourceDestroy(s *terraform.State) error { return nil } -func testSecretValues(t *testing.T, secret *model.SecretMetadata, key string) resource.TestCheckFunc { +func testAwsSecretValues(t *testing.T, secret *model.SecretMetadata, key string) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, secret.Key == key) assert.True(t, secret.LastUpdatedTimestamp > 0) @@ -118,7 +117,7 @@ func testSecretValues(t *testing.T, secret *model.SecretMetadata, key string) re } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testSecretResourceExists(n string, secret *model.SecretMetadata, t *testing.T) resource.TestCheckFunc { +func testAwsSecretResourceExists(n string, secret *model.SecretMetadata, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -142,7 +141,7 @@ func testSecretResourceExists(n string, secret *model.SecretMetadata, t *testing } // testAccTokenResource returns an configuration for an Example Widget with the provided name -func testSecretResource(scopeName, key, value string) string { +func testAwsSecretResource(scopeName, key, value string) string { return fmt.Sprintf(` resource "databricks_secret_scope" "my_scope" { name = "%s" diff --git a/databricks/resource_databricks_secret_azure_test.go b/databricks/resource_databricks_secret_azure_test.go new file mode 100644 index 000000000..9a759694a --- /dev/null +++ b/databricks/resource_databricks_secret_azure_test.go @@ -0,0 +1,155 @@ +package databricks + +import ( + "errors" + "fmt" + "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" +) + +func TestAccAzureSecretResource(t *testing.T) { + //var secretScope model.Secre + var secret model.SecretMetadata + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + scope := "terraform_acc_test_secret" + key := "my_cool_key" + stringValue := "my super secret key" + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAzureSecretResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAzureSecretResource(scope, key, stringValue), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureSecretResourceExists("databricks_secret.my_secret", &secret, t), + // verify remote values + testAzureSecretValues(t, &secret, key), + // verify local values + resource.TestCheckResourceAttr("databricks_secret.my_secret", "scope", scope), + resource.TestCheckResourceAttr("databricks_secret.my_secret", "key", key), + resource.TestCheckResourceAttr("databricks_secret.my_secret", "string_value", stringValue), + ), + }, + { + //Deleting and recreating the secret + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + err := client.Secrets().Delete(scope, secret.Key) + assert.NoError(t, err, err) + }, + // use a dynamic configuration with the random name from above + Config: testAzureSecretResource(scope, key, stringValue), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureSecretResourceExists("databricks_secret.my_secret", &secret, t), + // verify remote values + testAzureSecretValues(t, &secret, key), + // verify local values + resource.TestCheckResourceAttr("databricks_secret.my_secret", "scope", scope), + resource.TestCheckResourceAttr("databricks_secret.my_secret", "key", key), + resource.TestCheckResourceAttr("databricks_secret.my_secret", "string_value", stringValue), + ), + }, + { + //Deleting the scope should recreate the secret + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + err := client.SecretScopes().Delete(scope) + assert.NoError(t, err, err) + }, + // use a dynamic configuration with the random name from above + Config: testAzureSecretResource(scope, key, stringValue), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureSecretResourceExists("databricks_secret.my_secret", &secret, t), + // verify remote values + testAzureSecretValues(t, &secret, key), + // verify local values + resource.TestCheckResourceAttr("databricks_secret.my_secret", "scope", scope), + resource.TestCheckResourceAttr("databricks_secret.my_secret", "key", key), + resource.TestCheckResourceAttr("databricks_secret.my_secret", "string_value", stringValue), + ), + }, + }, + }) +} + +func testAzureSecretResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_secret" && rs.Type != "databricks_secret_scope" { + continue + } + _, err := client.Secrets().Read(rs.Primary.Attributes["scope"], rs.Primary.Attributes["key"]) + if err == nil { + return errors.New("resource secret is not cleaned up") + } + _, err = client.SecretScopes().Read(rs.Primary.Attributes["scope"]) + if err == nil { + return errors.New("resource secret is not cleaned up") + } + + } + return nil +} + +func testAzureSecretValues(t *testing.T, secret *model.SecretMetadata, key string) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, secret.Key == key) + assert.True(t, secret.LastUpdatedTimestamp > 0) + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAzureSecretResourceExists(n string, secret *model.SecretMetadata, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.Secrets().Read(rs.Primary.Attributes["scope"], rs.Primary.Attributes["key"]) + //t.Log(resp) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *secret = resp + return nil + //return fmt.Errorf("Token (%s) not found", rs.Primary.ID) + } +} + +// testAccTokenResource returns an configuration for an Example Widget with the provided name +func testAzureSecretResource(scopeName, key, value string) string { + return fmt.Sprintf(` + resource "databricks_secret_scope" "my_scope" { + name = "%s" + } + resource "databricks_secret" "my_secret" { + key = "%s" + string_value = "%s" + scope = databricks_secret_scope.my_scope.name + } + `, scopeName, key, value) +} diff --git a/databricks/resource_databricks_secret_scope_test.go b/databricks/resource_databricks_secret_scope_aws_test.go similarity index 80% rename from databricks/resource_databricks_secret_scope_test.go rename to databricks/resource_databricks_secret_scope_aws_test.go index 2dad8d270..2379c15c2 100644 --- a/databricks/resource_databricks_secret_scope_test.go +++ b/databricks/resource_databricks_secret_scope_aws_test.go @@ -11,7 +11,7 @@ import ( "testing" ) -func TestAccSecretScopeResource(t *testing.T) { +func TestAccAwsSecretScopeResource(t *testing.T) { var secretScope model.SecretScope // generate a random name for each tokenInfo test run, to avoid @@ -22,19 +22,18 @@ func TestAccSecretScopeResource(t *testing.T) { scope := "terraform_acc_test_scope" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testSecretScopeResourceDestroy, + CheckDestroy: testAwsSecretScopeResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testSecretScopeResource(scope), + Config: testAwsSecretScopeResource(scope), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testSecretScopeResourceExists("databricks_secret_scope.my_scope", &secretScope, t), + testAwsSecretScopeResourceExists("databricks_secret_scope.my_scope", &secretScope, t), // verify remote values - testSecretScopeValues(t, &secretScope, scope), + testAwsSecretScopeValues(t, &secretScope, scope), // verify local values resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "name", scope), resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "backend_type", string(model.ScopeBackendTypeDatabricks)), @@ -47,13 +46,13 @@ func TestAccSecretScopeResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testSecretScopeResource(scope), + Config: testAwsSecretScopeResource(scope), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testSecretScopeResourceExists("databricks_secret_scope.my_scope", &secretScope, t), + testAwsSecretScopeResourceExists("databricks_secret_scope.my_scope", &secretScope, t), // verify remote values - testSecretScopeValues(t, &secretScope, scope), + testAwsSecretScopeValues(t, &secretScope, scope), // verify local values resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "name", scope), resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "backend_type", string(model.ScopeBackendTypeDatabricks)), @@ -63,7 +62,7 @@ func TestAccSecretScopeResource(t *testing.T) { }) } -func testSecretScopeResourceDestroy(s *terraform.State) error { +func testAwsSecretScopeResourceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_secret_scope" { @@ -78,7 +77,7 @@ func testSecretScopeResourceDestroy(s *terraform.State) error { return nil } -func testSecretScopeValues(t *testing.T, secretScope *model.SecretScope, scope string) resource.TestCheckFunc { +func testAwsSecretScopeValues(t *testing.T, secretScope *model.SecretScope, scope string) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, secretScope.Name == scope) assert.True(t, secretScope.BackendType == model.ScopeBackendTypeDatabricks) @@ -87,7 +86,7 @@ func testSecretScopeValues(t *testing.T, secretScope *model.SecretScope, scope s } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testSecretScopeResourceExists(n string, secretScope *model.SecretScope, t *testing.T) resource.TestCheckFunc { +func testAwsSecretScopeResourceExists(n string, secretScope *model.SecretScope, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -111,7 +110,7 @@ func testSecretScopeResourceExists(n string, secretScope *model.SecretScope, t * } // testAccTokenResource returns an configuration for an Example Widget with the provided name -func testSecretScopeResource(scopeName string) string { +func testAwsSecretScopeResource(scopeName string) string { return fmt.Sprintf(` resource "databricks_secret_scope" "my_scope" { name = "%s" diff --git a/databricks/resource_databricks_secret_scope_azure_test.go b/databricks/resource_databricks_secret_scope_azure_test.go new file mode 100644 index 000000000..004034f67 --- /dev/null +++ b/databricks/resource_databricks_secret_scope_azure_test.go @@ -0,0 +1,119 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAzureSecretScopeResource(t *testing.T) { + var secretScope model.SecretScope + + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + scope := "terraform_acc_test_scope" + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAzureSecretScopeResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAzureSecretScopeResource(scope), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureSecretScopeResourceExists("databricks_secret_scope.my_scope", &secretScope, t), + // verify remote values + testAzureSecretScopeValues(t, &secretScope, scope), + // verify local values + resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "name", scope), + resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "backend_type", string(model.ScopeBackendTypeDatabricks)), + ), + }, + { + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + err := client.SecretScopes().Delete(scope) + assert.NoError(t, err, err) + }, + // use a dynamic configuration with the random name from above + Config: testAzureSecretScopeResource(scope), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureSecretScopeResourceExists("databricks_secret_scope.my_scope", &secretScope, t), + // verify remote values + testAzureSecretScopeValues(t, &secretScope, scope), + // verify local values + resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "name", scope), + resource.TestCheckResourceAttr("databricks_secret_scope.my_scope", "backend_type", string(model.ScopeBackendTypeDatabricks)), + ), + }, + }, + }) +} + +func testAzureSecretScopeResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_secret_scope" { + continue + } + _, err := client.Tokens().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("resource token is not cleaned up") + } + return nil +} + +func testAzureSecretScopeValues(t *testing.T, secretScope *model.SecretScope, scope string) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, secretScope.Name == scope) + assert.True(t, secretScope.BackendType == model.ScopeBackendTypeDatabricks) + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAzureSecretScopeResourceExists(n string, secretScope *model.SecretScope, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.SecretScopes().Read(rs.Primary.ID) + //t.Log(resp) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *secretScope = resp + return nil + //return fmt.Errorf("Token (%s) not found", rs.Primary.ID) + } +} + +// testAccTokenResource returns an configuration for an Example Widget with the provided name +func testAzureSecretScopeResource(scopeName string) string { + return fmt.Sprintf(` + resource "databricks_secret_scope" "my_scope" { + name = "%s" + } + `, scopeName) +} diff --git a/databricks/resource_databricks_token_test.go b/databricks/resource_databricks_token_aws_test.go similarity index 75% rename from databricks/resource_databricks_token_test.go rename to databricks/resource_databricks_token_aws_test.go index 37bb69d83..917155911 100644 --- a/databricks/resource_databricks_token_test.go +++ b/databricks/resource_databricks_token_aws_test.go @@ -12,7 +12,7 @@ import ( "testing" ) -func TestAccTokenResource(t *testing.T) { +func TestAccAwsTokenResource(t *testing.T) { var tokenInfo model.TokenInfo // generate a random name for each tokenInfo test run, to avoid @@ -22,19 +22,18 @@ func TestAccTokenResource(t *testing.T) { rComment := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckTokenResourceDestroy, + CheckDestroy: testAccAwsCheckTokenResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testAccTokenResource(rComment), + Config: testAccAwsTokenResource(rComment), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testAccCheckTokenResourceExists("databricks_token.my-token", &tokenInfo, t), + testAccAwsCheckTokenResourceExists("databricks_token.my-token", &tokenInfo, t), // verify remote values - testAccCheckTokenValues(&tokenInfo, rComment), + testAccAwsCheckTokenValues(&tokenInfo, rComment), // verify local values resource.TestCheckResourceAttr("databricks_token.my-token", "lifetime_seconds", "6000"), resource.TestCheckResourceAttr("databricks_token.my-token", "comment", rComment), @@ -48,13 +47,13 @@ func TestAccTokenResource(t *testing.T) { assert.NoError(t, err, err) }, // use a dynamic configuration with the random name from above - Config: testAccTokenResource(rComment), + Config: testAccAwsTokenResource(rComment), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testAccCheckTokenResourceExists("databricks_token.my-token", &tokenInfo, t), + testAccAwsCheckTokenResourceExists("databricks_token.my-token", &tokenInfo, t), // verify remote values - testAccCheckTokenValues(&tokenInfo, rComment), + testAccAwsCheckTokenValues(&tokenInfo, rComment), // verify local values resource.TestCheckResourceAttr("databricks_token.my-token", "lifetime_seconds", "6000"), resource.TestCheckResourceAttr("databricks_token.my-token", "comment", rComment), @@ -64,7 +63,7 @@ func TestAccTokenResource(t *testing.T) { }) } -func testAccCheckTokenResourceDestroy(s *terraform.State) error { +func testAccAwsCheckTokenResourceDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_token" { @@ -79,10 +78,7 @@ func testAccCheckTokenResourceDestroy(s *terraform.State) error { return nil } -func testAccPreCheck(t *testing.T) { -} - -func testAccCheckTokenValues(tokenInfo *model.TokenInfo, comment string) resource.TestCheckFunc { +func testAccAwsCheckTokenValues(tokenInfo *model.TokenInfo, comment string) resource.TestCheckFunc { return func(s *terraform.State) error { if tokenInfo.Comment != comment { return errors.New("the comment for the token created does not equal the value passed in") @@ -91,8 +87,8 @@ func testAccCheckTokenValues(tokenInfo *model.TokenInfo, comment string) resourc } } -// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testAccCheckTokenResourceExists(n string, tokenInfo *model.TokenInfo, t *testing.T) resource.TestCheckFunc { +// testAccAwsCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAccAwsCheckTokenResourceExists(n string, tokenInfo *model.TokenInfo, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -114,8 +110,8 @@ func testAccCheckTokenResourceExists(n string, tokenInfo *model.TokenInfo, t *te } } -// testAccTokenResource returns an configuration for an Example Widget with the provided name -func testAccTokenResource(comment string) string { +// testAccAwsTokenResource returns an configuration for an Example Widget with the provided name +func testAccAwsTokenResource(comment string) string { return fmt.Sprintf(` resource "databricks_token" "my-token" { lifetime_seconds = 6000 diff --git a/databricks/resource_databricks_token_azure_test.go b/databricks/resource_databricks_token_azure_test.go new file mode 100644 index 000000000..bb2c6c70b --- /dev/null +++ b/databricks/resource_databricks_token_azure_test.go @@ -0,0 +1,121 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAzureTokenResource(t *testing.T) { + var tokenInfo model.TokenInfo + + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + rComment := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccAzureCheckTokenResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAccAzureTokenResource(rComment), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAccAzureCheckTokenResourceExists("databricks_token.my-token", &tokenInfo, t), + // verify remote values + testAccAzureCheckTokenValues(&tokenInfo, rComment), + // verify local values + resource.TestCheckResourceAttr("databricks_token.my-token", "lifetime_seconds", "6000"), + resource.TestCheckResourceAttr("databricks_token.my-token", "comment", rComment), + ), + }, + { + //Deleting and recreating the token + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + err := client.Tokens().Delete(tokenInfo.TokenID) + assert.NoError(t, err, err) + }, + // use a dynamic configuration with the random name from above + Config: testAccAzureTokenResource(rComment), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAccAzureCheckTokenResourceExists("databricks_token.my-token", &tokenInfo, t), + // verify remote values + testAccAzureCheckTokenValues(&tokenInfo, rComment), + // verify local values + resource.TestCheckResourceAttr("databricks_token.my-token", "lifetime_seconds", "6000"), + resource.TestCheckResourceAttr("databricks_token.my-token", "comment", rComment), + ), + }, + }, + }) +} + +func testAccAzureCheckTokenResourceDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_token" { + continue + } + _, err := conn.Tokens().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("resource token is not cleaned up") + } + return nil +} + +func testAccAzureCheckTokenValues(tokenInfo *model.TokenInfo, comment string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if tokenInfo.Comment != comment { + return errors.New("the comment for the token created does not equal the value passed in") + } + return nil + } +} + +// testAccAzureCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAccAzureCheckTokenResourceExists(n string, tokenInfo *model.TokenInfo, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.Tokens().Read(rs.Primary.ID) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *tokenInfo = resp + return nil + //return fmt.Errorf("Token (%s) not found", rs.Primary.ID) + } +} + +// testAccAzureTokenResource returns an configuration for an Example Widget with the provided name +func testAccAzureTokenResource(comment string) string { + return fmt.Sprintf(` + resource "databricks_token" "my-token" { + lifetime_seconds = 6000 + comment = "%v" + } + `, comment) +} diff --git a/databricks/utils.go b/databricks/utils.go index 64f223770..6684aeed8 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -68,3 +68,27 @@ func changeClusterIntoRunningState(clusterID string, client *service.DBApiClient return fmt.Errorf("cluster is in a non recoverable state: %s", currentState) } + +// PackagedMWSIds is a struct that contains both the MWS acct id and the ResourceId (resources are networks, creds, etc.) +type PackagedMWSIds struct { + MwsAcctId string + ResourceId string +} + +// Helps package up MWSAccountId with another id such as credentials id or network id +// uses format mwsAcctId/otherId +func packMWSAccountId(idsToPackage PackagedMWSIds) string { + return fmt.Sprintf("%s/%s", idsToPackage.MwsAcctId, idsToPackage.ResourceId) +} + +// Helps unpackage MWSAccountId from another id such as credentials id or network id +func unpackMWSAccountId(combined string) (PackagedMWSIds, error) { + var packagedMWSIds PackagedMWSIds + parts := strings.Split(combined, "/") + if len(parts) != 2 { + return packagedMWSIds, fmt.Errorf("unpacked account has more than or less than two parts, combined id: %s", combined) + } + packagedMWSIds.MwsAcctId = parts[0] + packagedMWSIds.ResourceId = parts[1] + return packagedMWSIds, nil +} diff --git a/integration-environment-mws/README.MD b/integration-environment-mws/README.MD new file mode 100644 index 000000000..ec2a1b43e --- /dev/null +++ b/integration-environment-mws/README.MD @@ -0,0 +1,37 @@ +# Integration Testing on AWS with MWS + +The `run.sh` script will setup the required resources in AWS defined in `prereqs.tf` and then pass these as environment variables to the `golang` integration tests. + +Once the tests have finished the `run.sh` will attempt to cleanup resources unless an environment variable of `SKIP_CLEANUP` is set. + +> Note: Recreating the resouces each run is a good practive when running the tests as it ensures that past runs haven't made changes which effect future tests. For a quicker loop when debugging see the `Debugging` section. + +*Requirements* +- `.env` file at root of project is set with a SP which has ability to assign roles (easiest to set OWNER on sub) +- `terraform` installed +- AWS_ACCESS_KEY_ID environment variable set +- AWS_SECRET_ACCESS_KEY environment variable set +- DATABRICKS_MWS_AWS_REGION environment variable set +- DATABRICKS_MWS_ACCT_ID environment variable set +- DATABRICKS_MWS_USERNAME environment variable set +- DATABRICKS_MWS_PASSWORD environment variable set +- DATABRICKS_MWS_HOST environment variable set + +## Debugging + +If you want to run in a tighter loop without waiting on resource creation each time you invoke the tests you can use the `SKIP_CLEANUP` env like so: + +``` +export SKIP_CLEANUP=true +export AWS_ACCESS_KEY_ID=... +export AWS_SECRET_ACCESS_KEY=... +export DATABRICKS_MWS_AWS_REGION=... +export DATABRICKS_MWS_ACCT_ID=... +export DATABRICKS_MWS_USERNAME=... +export DATABRICKS_MWS_PASSWORD=... +export DATABRICKS_MWS_HOST=... +integration-environment-azure/run.sh +``` + +In this case the same workspace, storage and other pre-reqs will be used each run. + -var 'databricks_mws_acct_id=$DATABRICKS_MWS_ACCT_ID' \ No newline at end of file diff --git a/integration-environment-mws/prereqs.tf b/integration-environment-mws/prereqs.tf new file mode 100644 index 000000000..b0124d09c --- /dev/null +++ b/integration-environment-mws/prereqs.tf @@ -0,0 +1,171 @@ +variable "databricks_mws_aws_acct_id" { + type = string +} + +variable "databricks_mws_acct_id" { + type = string +} + +provider "aws" { +} + +provider "random" { + version = "~> 2.2" +} + +resource "random_string" "naming" { + special = false + upper = false + length = 6 +} + +data "template_file" "cross_account_role_policy" { + template = "${file("${path.module}/templates/cross_account_role_policy.tpl")}" +} + +data "template_file" "cross_account_role_assume_policy" { + template = "${file("${path.module}/templates/cross_account_role_assume_policy.tpl")}" + vars = { + databricks_app_external_id = var.databricks_mws_acct_id + databricks_aws_account_id = var.databricks_mws_aws_acct_id + } +} + +resource "aws_iam_role" "cross_account_role" { + name = "tf_test_cross_acct_role_${random_string.naming.result}" + assume_role_policy = data.template_file.cross_account_role_assume_policy.rendered +} + +resource "aws_iam_policy" "cross_account_role_policy" { + name = "tf_test_cross_acct_role_${random_string.naming.result}_policy" + description = "E2 Workspace Cross account role policy policy" + policy = data.template_file.cross_account_role_policy.rendered +} + +resource "aws_iam_role_policy_attachment" "cross_account_role_policy_attach" { + role = aws_iam_role.cross_account_role.name + policy_arn = aws_iam_policy.cross_account_role_policy.arn +} + +data "template_file" "storage_bucket_policy" { + template = "${file("${path.module}/templates/storage_bucket_policy.tpl")}" + vars = { + bucket_name = aws_s3_bucket.root_storage_bucket.bucket + databricks_aws_account_id = var.databricks_mws_aws_acct_id + } +} + + +resource "aws_s3_bucket" "root_storage_bucket" { + bucket = "tf-test-root-bucket-${random_string.naming.result}" + acl = "private" + versioning { + enabled = false + } + force_destroy = true + + tags = { + Name = "tf-test-bucket" + Environment = "Dev" + Owner = "test@databricks.com" + } +} + + +resource "aws_s3_bucket_policy" "root_bucket_policy" { + bucket = aws_s3_bucket.root_storage_bucket.id + policy = data.template_file.storage_bucket_policy.rendered +} + +resource "aws_vpc" "main" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = true + + tags = { + Name = "tf-test-mws-vpc" + } +} + +resource "aws_subnet" "public" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-east-1b" + + tags = { + Name = "public-subnet" + } +} + +resource "aws_subnet" "private" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.2.0/24" + availability_zone = "us-east-1a" + tags = { + Name = "private-subnet" + } +} + +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.main.id + tags = { + Name = "test-igw" + } +} + +resource "aws_route" "r" { + route_table_id = aws_vpc.main.default_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id + + depends_on = [aws_internet_gateway.gw, aws_vpc.main] +} + +resource "aws_security_group" "test_sg" { + name = "all all" + description = "Allow inbound traffic" + vpc_id = aws_vpc.main.id + + ingress { + description = "All" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "test_sg" + } +} + + +output "aws_s3_bucket_name" { + value = aws_s3_bucket.root_storage_bucket.bucket +} + +output "aws_cross_acct_role_arn" { + value = aws_iam_role.cross_account_role.arn +} + +output "aws_vpc_id" { + value = aws_vpc.main.id +} + +output "aws_subnet_1" { + value = aws_subnet.public.id +} + +output "aws_subnet_2" { + value = aws_subnet.private.id +} + +output "aws_sg" { + value = aws_security_group.test_sg.id +} diff --git a/integration-environment-mws/run.sh b/integration-environment-mws/run.sh new file mode 100755 index 000000000..e1717a0f3 --- /dev/null +++ b/integration-environment-mws/run.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -e +cd $(dirname "$0") + +export AWS_DEFAULT_REGION=$DATABRICKS_MWS_AWS_REGION +export DATABRICKS_USERNAME=$DATABRICKS_MWS_USERNAME +export DATABRICKS_PASSWORD=$DATABRICKS_MWS_PASSWORD + +echo "Working in region: -- $AWS_DEFAULT_REGION" + + +function cleanup() +{ + echo -e "----> Destroy prereqs \n\n" + if [ -z "$SKIP_CLEANUP" ] + then + terraform destroy -auto-approve \ + -var 'databricks_mws_aws_acct_id=414351767826' \ + -var 'databricks_mws_acct_id=$DATABRICKS_MWS_ACCT_ID' + else + echo "\$SKIP_CLEANUP is set so 'terraform destroy' not run. Warning: Resources left in aws account." + fi +} +trap cleanup EXIT + +echo -e "----> Running Terraform to create prereqs in AWS Account for MWS \n\n" + +# Remove any old state unless SKIP_CLEANUP set +if [ -z "$SKIP_CLEANUP" ] +then + echo "\$SKIP_CLEANUP isn't set so removing any pre-existing terraform state" + rm -f *.tfstate +fi + +terraform init +terraform apply -auto-approve \ + -var "databricks_mws_aws_acct_id=414351767826" \ + -var "databricks_mws_acct_id=$DATABRICKS_MWS_ACCT_ID" + +export TEST_MWS_CROSS_ACCT_ROLE=$(terraform output aws_cross_acct_role_arn) +export TEST_MWS_ROOT_BUCKET=$(terraform output aws_s3_bucket_name) + +export TEST_MWS_VPC_ID=$(terraform output aws_vpc_id) +export TEST_MWS_SUBNET_1=$(terraform output aws_subnet_1) +export TEST_MWS_SUBNET_2=$(terraform output aws_subnet_2) +export TEST_MWS_SG=$(terraform output aws_sg) + + +echo -e "----> Running AWS Multiple Workspaces Acceptance Tests \n\n" +# Output debug log to file while tests run +#export TF_LOG_PATH=$PWD/tf.log +# Run all AWS Multipleworkspace integration tests +TF_ACC=1 gotestsum --format short-verbose --raw-command go test -v -json -short -coverprofile=coverage.out -test.timeout 35m -run 'TestAccMWS' ./../... \ No newline at end of file diff --git a/integration-environment-mws/templates/cross_account_role_assume_policy.tpl b/integration-environment-mws/templates/cross_account_role_assume_policy.tpl new file mode 100644 index 000000000..14fddf6bc --- /dev/null +++ b/integration-environment-mws/templates/cross_account_role_assume_policy.tpl @@ -0,0 +1,17 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::${databricks_aws_account_id}:root" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "${databricks_app_external_id}" + } + } + } + ] +} \ No newline at end of file diff --git a/integration-environment-mws/templates/cross_account_role_policy.tpl b/integration-environment-mws/templates/cross_account_role_policy.tpl new file mode 100644 index 000000000..055661163 --- /dev/null +++ b/integration-environment-mws/templates/cross_account_role_policy.tpl @@ -0,0 +1,81 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1403287045000", + "Effect": "Allow", + "Action": [ + "ec2:AssociateDhcpOptions", + "ec2:AssociateIamInstanceProfile", + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateDhcpOptions", + "ec2:CreateInternetGateway", + "ec2:CreateKeyPair", + "ec2:CreateRoute", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateVpc", + "ec2:DeleteInternetGateway", + "ec2:DeleteKeyPair", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DeleteVpc", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkAcls", + "ec2:DescribeInternetGateways", + "ec2:DescribeVpcAttribute", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribePrefixLists", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcs", + "ec2:DetachInternetGateway", + "ec2:DisassociateIamInstanceProfile", + "ec2:ModifyVpcAttribute", + "ec2:ReplaceIamInstanceProfileAssociation", + "ec2:RequestSpotInstances", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:TerminateInstances", + "ec2:CreatePlacementGroup", + "ec2:DeletePlacementGroup", + "ec2:DescribePlacementGroups" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + } + } + ] +} \ No newline at end of file diff --git a/integration-environment-mws/templates/storage_bucket_policy.tpl b/integration-environment-mws/templates/storage_bucket_policy.tpl new file mode 100644 index 000000000..0d842fcdd --- /dev/null +++ b/integration-environment-mws/templates/storage_bucket_policy.tpl @@ -0,0 +1,24 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Grant Databricks Access", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::${databricks_aws_account_id}:root" + }, + "Action": [ + "s3:GetObject", + "s3:GetObjectVersion", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": [ + "arn:aws:s3:::${bucket_name}/*", + "arn:aws:s3:::${bucket_name}" + ] + } + ] +} From a21b81523d8fee0dc42d67bb76bf7f4adc34a2fe Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Tue, 9 Jun 2020 17:35:56 -0400 Subject: [PATCH 05/10] added documentation for mws resources --- website/content/Provider/_index.md | 29 +++- website/content/Resources/mws_credentials.md | 77 ++++++++++ website/content/Resources/mws_networks.md | 105 ++++++++++++++ .../Resources/mws_storage_configurations.md | 74 ++++++++++ website/content/Resources/mws_workspaces.md | 137 ++++++++++++++++++ 5 files changed, 419 insertions(+), 3 deletions(-) create mode 100644 website/content/Resources/mws_credentials.md create mode 100644 website/content/Resources/mws_networks.md create mode 100644 website/content/Resources/mws_storage_configurations.md create mode 100644 website/content/Resources/mws_workspaces.md diff --git a/website/content/Provider/_index.md b/website/content/Provider/_index.md index 4a3b86ccc..b3d0f7db8 100644 --- a/website/content/Provider/_index.md +++ b/website/content/Provider/_index.md @@ -31,8 +31,10 @@ resource "databricks_scim_user" "my-user" { ``` hcl provider "databricks" { host = "http://databricks.domain.com" - token = base64encode("${var.user}:${var.password}") - auth_type = "BASIC" + basic_auth { + username = var.user + password = var.password + } } resource "databricks_scim_user" "my-user" { @@ -137,6 +139,8 @@ The following variables can be passed via environment variables: * `host` → `DATABRICKS_HOST` * `token` → `DATABRICKS_TOKEN` +* `basic_auth.username` → `DATABRICKS_USERNAME` +* `basic_auth.password` → `DATABRICKS_PASSWORD` * `managed_resource_group` → `DATABRICKS_AZURE_MANAGED_RESOURCE_GROUP` * `azure_region` → `AZURE_REGION` * `workspace_name` → `DATABRICKS_AZURE_WORKSPACE_NAME` @@ -172,7 +176,26 @@ Alternatively you can provide this value as an environment variable `DATABRICKS_ > This is the api token to authenticate into the workspace. Alternatively you can provide this value as an environment variable `DATABRICKS_TOKEN`. -#### `azure-auth`: +#### `basic_auth`: +> #### **Usage** +>```hcl +>basic_auth = { +> username = "user" +> password = "mypass-123" +>} +>``` +> {{%chevron default=`This is the authentication required to authenticate to the Databricks via basic auth through a user + that has access to the workspace. This is optional as you can use the api token based auth. +The basic_auth block contains the following arguments:` display="true" %}} + +* `username` - This is the username of the user that can log into the workspace. +Alternatively you can provide this value as an environment variable `DATABRICKS_USERNAME`. + +* `password` - This is the password of the user that can log into the workspace. +Alternatively you can provide this value as an environment variable `DATABRICKS_PASSWORD`. +{{% /chevron %}} + +#### `azure_auth`: > #### **Usage** >```hcl >azure_auth = { diff --git a/website/content/Resources/mws_credentials.md b/website/content/Resources/mws_credentials.md new file mode 100644 index 000000000..b87912ca9 --- /dev/null +++ b/website/content/Resources/mws_credentials.md @@ -0,0 +1,77 @@ ++++ +title = "mws_credentials" +date = 2020-04-20T23:34:03-04:00 +weight = 15 +chapter = false +pre = "" ++++ + + +## Resource: `mws_credentials` + +This resource to configure the credentials for the multiple workspaces api. + +{{% notice warning %}} +It is important to understand that this will require you to configure your provider separately for the +multiple workspaces resources +{{% /notice %}} + +{{% notice note %}} +This will point to https://accounts.cloud.databricks.com for the HOST and it will use basic auth +as that is the only authentication method available for multiple workspaces api. +{{% /notice %}} + + +## Example Usage + +````hcl +provider "databricks" { + host = "https://accounts.cloud.databricks.com" + basic_auth { + username = "username" + password = "password" + } +} +resource "databricks_mws_credentials" "my_mws_credentials" { + account_id = "my-mws-account-id" + credentials_name = "my-cusotom-credentials" + role_arn = "arn:aws:iam::9999999999999:role/my-custom-cross-account-role" +} +```` +## Argument Reference + +The following arguments are supported: + +#### - `account_id`: +> **(Required)** Databricks multi-workspace master account ID. + +#### - `credentials_name`: +> **(Required)** The human-readable name of the credential configuration object. + +#### - `role_arn`: +> **(Required)** This is the ARN of the role arn for the cross account role. + + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +#### - `id`: +> The id of the resource which follows the format accountId/credentialsId. + +#### - `creation_time`: +> Time in epoch milliseconds when the credential was created. + +#### - `external_id`: +> The external ID that needs be trusted by the cross-account role. This is always the account_id, which is your Databricks multi-workspace master account ID. + +#### - `credentials_id`: +> Databricks credential configuration ID. + + + +## Import + +{{% notice note %}} +Importing this resource is not currently supported. +{{% /notice %}} diff --git a/website/content/Resources/mws_networks.md b/website/content/Resources/mws_networks.md new file mode 100644 index 000000000..dbacb20c4 --- /dev/null +++ b/website/content/Resources/mws_networks.md @@ -0,0 +1,105 @@ ++++ +title = "mws_networks" +date = 2020-04-20T23:34:03-04:00 +weight = 15 +chapter = false +pre = "" ++++ + + +## Resource: `mws_networks` + +This resource to configure the vpc for the multiple workspaces api if the BYOVPC option is chosen. + +{{% notice warning %}} +It is important to understand that this will require you to configure your provider separately for the +multiple workspaces resources +{{% /notice %}} + +{{% notice note %}} +This will point to https://accounts.cloud.databricks.com for the HOST and it will use basic auth +as that is the only authentication method available for multiple workspaces api. +{{% /notice %}} + + +## Example Usage + +````hcl +provider "databricks" { + host = "https://accounts.cloud.databricks.com" + basic_auth { + username = "username" + password = "password" + } +} +resource "databricks_mws_networks" "my_network" { + account_id = "my-mws-acct-id" + network_name = "my-custom-network-config-name" + vpc_id = "my-aws-vpc-id" + subnet_ids = [ + "my-first-subnet", + "my-second-subnet", + ] + security_group_ids = [ + "my-security-group-1", + ] +} +```` +## Argument Reference + +The following arguments are supported: + +#### - `account_id`: +> **(Required)** Databricks multi-workspace master account ID. + +#### - `network_name`: +> **(Required)** The human-readable name of the network configuration. + +#### - `vpc_id`: +> **(Required)** The ID of the VPC associated with this network. VPC IDs can be used in multiple network configurations. + +#### - `subnet_ids`: +> **(Required)** IDs of at least 2 subnets associated with this network. +>Subnet IDs cannot be used in multiple network configurations. + +#### - `security_group_ids`: +> **(Required)** IDs of 1 to 5 security groups associated with this network. +>Security groups IDs cannot be used in multiple network configurations. + + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +#### - `id`: +> The id of the resource which follows the format accountId/networksId. + +#### - `vpc_status`: +> Enum: "UNATTACHED" "VALID" "BROKEN" "WARNED" +> The status of this network configuration object in terms of its use in a workspace: +> +> UNATTACHED — Unattached. +> VALID — Valid. +> BROKEN — Broken. +> WARNED — Warned. + +#### - `error_messages`: +> Array of error messages about the network configuration. +> Contains the following objects: +> error_type: The AWS resource associated with this error: credentials, VPC, subnet, security group, or network ACL. +> error_message: Details of the error. + +#### - `workspace_id`: +> Workspace ID associated with this network configuration. Can be empty. + +#### - `creation_time`: +> Time in epoch milliseconds when the network was created. + +#### - `network_id`: +> The Databricks network configuration ID. + +## Import + +{{% notice note %}} +Importing this resource is not currently supported. +{{% /notice %}} diff --git a/website/content/Resources/mws_storage_configurations.md b/website/content/Resources/mws_storage_configurations.md new file mode 100644 index 000000000..2f9249016 --- /dev/null +++ b/website/content/Resources/mws_storage_configurations.md @@ -0,0 +1,74 @@ ++++ +title = "multiworkspace_storage_configurations" +date = 2020-04-20T23:34:03-04:00 +weight = 15 +chapter = false +pre = "" ++++ + + +## Resource: `multiworkspace_storage_configurations` + +This resource to configure the root bucket for the multiple workspaces api. + +{{% notice warning %}} +It is important to understand that this will require you to configure your provider separately for the +multiple workspaces resources +{{% /notice %}} + +{{% notice note %}} +This will point to https://accounts.cloud.databricks.com for the HOST and it will use basic auth +as that is the only authentication method available for multiple workspaces api. +{{% /notice %}} + + +## Example Usage + +````hcl +provider "databricks" { + host = "https://accounts.cloud.databricks.com" + basic_auth { + username = "username" + password = "password" + } +} +resource "databricks_mws_storage_configurations" "my_mws_storage_configurations" { + account_id = "my-mws-acct-id" + storage_configuration_name = "storage-configuration-name" + bucket_name = "my-root-s3-bucket" +} +```` +## Argument Reference + +The following arguments are supported: + +#### - `account_id`: +> **(Required)** Databricks multi-workspace master account ID. + +#### - `storage_configuration_name`: +> **(Required)** The human-readable name of the storage configuration. + +#### - `bucket_name`: +> **(Required)** Root S3 bucket information. + + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +#### - `id`: +> The id of the resource which follows the format accountId/storageConfigurationId. + +#### - `creation_time`: +> Time in epoch milliseconds when the storage configuration was created + +#### - `storage_configuration_id`: +> Databricks storage configuration ID. + + + +## Import + +{{% notice note %}} +Importing this resource is not currently supported. +{{% /notice %}} diff --git a/website/content/Resources/mws_workspaces.md b/website/content/Resources/mws_workspaces.md new file mode 100644 index 000000000..810d08609 --- /dev/null +++ b/website/content/Resources/mws_workspaces.md @@ -0,0 +1,137 @@ ++++ +title = "mws_workspaces" +date = 2020-04-20T23:34:03-04:00 +weight = 15 +chapter = false +pre = "" ++++ + + +## Resource: `mws_workspaces` + +This resource to configure the vpc for the multiple workspaces api if the BYOVPC option is chosen. + +{{% notice warning %}} +It is important to understand that this will require you to configure your provider separately for the +multiple workspaces resources +{{% /notice %}} + +{{% notice note %}} +This will point to https://accounts.cloud.databricks.com for the HOST and it will use basic auth +as that is the only authentication method available for multiple workspaces api. +{{% /notice %}} + + +## Example Usage + +````hcl +provider "databricks" { + host = "https://accounts.cloud.databricks.com" + basic_auth { + username = "username" + password = "password" + } +} +resource "databricks_mws_credentials" "my_mws_credentials" { + account_id = "my-mws-account-id" + credentials_name = "my-cusotom-credentials" + role_arn = "arn:aws:iam::9999999999999:role/my-custom-cross-account-role" +} +resource "databricks_mws_storage_configurations" "my_mws_storage_configurations" { + account_id = "my-mws-acct-id" + storage_configuration_name = "storage-configuration-name" + bucket_name = "my-root-s3-bucket" +} +resource "databricks_mws_networks" "my_network" { + account_id = "my-mws-acct-id" + network_name = "my-custom-network-config-name" + vpc_id = "my-aws-vpc-id" + subnet_ids = [ + "my-first-subnet", + "my-second-subnet", + ] + security_group_ids = [ + "my-security-group-1", + ] +} +resource "databricks_mws_workspaces" "my_mws_workspace" { + account_id = "my-mws-acct-id" + workspace_name = "my-workspace-name" + deployment_name = "my-deployment-urlname" + aws_region = "my-aws-region" + credentials_id = databricks_mws_credentials.my_mws_credentials.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.my_mws_storage_configurations.storage_configuration_id + network_id = databricks_mws_networks.my_network.network_id + verify_workspace_runnning = true +} +```` +## Argument Reference + +The following arguments are supported: + +#### - `account_id`: +> **(Required)** Databricks multi-workspace master account ID. + +#### - `workspace_name`: +> **(Required)** The workspace's human-readable name. It is used as part of the workspace URL. + +#### - `deployment_name`: +> **(Required)** The name of the deployment you want. The URL prefix of the workspace. +>Append .cloud.databricks.com to get the full URL. + +#### - `aws_region`: +> **(Required)** The AWS region of the workspace's Data Plane. + +#### - `credentials_id`: +> **(Required)** ID of the workspace's credential configuration object + +#### - `storage_configuration_id`: +> **(Required)** The ID of the workspace's storage configuration object. + +#### - `verify_workspace_runnning`: +> **(Required)** Validates that the workspace is functioning post creation. Recommended to turn this on +>to verify post apply that the workspace is in a running conidition + +#### - `network_id`: +> **(Optional)** The ID of the workspace's network configuration object. + +#### - `is_no_public_ip_enabled`: +> **(Optional)** Specifies whether secure cluster connectivity (sometimes called no public IP) is enabled on this workspace. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +#### - `id`: +> The id of the resource which follows the format accountId/workspaceId. + +#### - `workspace_status`: +> Enum: "NOT_PROVISIONED" "PROVISIONING" "RUNNING" "FAILED" "BANNED" "CANCELLING" +>The status of the workspace. For workspace creation, it is typically initially PROVISIONING. +>Continue to check the status until the status is RUNNING. +>For detailed instructions of creating a new workspace with this API including error handling see +>Create a new workspace with the Multi-workspace API. + +#### - `workspace_status_message`: +> Message describing the current workspace status. + +#### - `creation_time`: +> Time in epoch milliseconds when the workspace was created. + +#### - `workspace_id`: +> The Databricks workspace ID. + +#### - `workspace_url`: +> The URL for the workspace. + +#### - `network_error_messages`: +> Array of error messages about the network configuration. +> Contains the following objects: +> error_type: The AWS resource associated with this error: credentials, VPC, subnet, security group, or network ACL. +> error_message: Details of the error. + +## Import + +{{% notice note %}} +Importing this resource is not currently supported. +{{% /notice %}} From 0dd0bc94a30de273010d365ddf32e238d511ad76 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Tue, 9 Jun 2020 17:47:37 -0400 Subject: [PATCH 06/10] renamed e2 to MWS in service --- client/service/mws_credentials.go | 2 +- client/service/mws_credentials_integration_test.go | 2 +- client/service/mws_customer_managed_keys.go | 2 +- client/service/mws_customer_managed_keys_integration_test.go | 2 +- client/service/mws_networks.go | 2 +- client/service/mws_networks_integration_test.go | 2 +- client/service/mws_storage_configurations_integration_test.go | 2 +- client/service/mws_workspaces_integration_test.go | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/client/service/mws_credentials.go b/client/service/mws_credentials.go index 3b77cf18e..ed6ffb7c6 100644 --- a/client/service/mws_credentials.go +++ b/client/service/mws_credentials.go @@ -12,7 +12,7 @@ type MWSCredentialsAPI struct { Client *DBApiClient } -// Create creates a set of E2 Credentials for the cross account role +// Create creates a set of MWS Credentials for the cross account role func (a MWSCredentialsAPI) Create(mwsAcctId, credentialsName string, roleArn string) (model.MWSCredentials, error) { var mwsCreds model.MWSCredentials diff --git a/client/service/mws_credentials_integration_test.go b/client/service/mws_credentials_integration_test.go index ab804521b..a0335e171 100644 --- a/client/service/mws_credentials_integration_test.go +++ b/client/service/mws_credentials_integration_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -func TestE2Creds(t *testing.T) { +func TestMWSCreds(t *testing.T) { if testing.Short() { t.Skip("skipping integration test in short mode.") } diff --git a/client/service/mws_customer_managed_keys.go b/client/service/mws_customer_managed_keys.go index 86642bf19..4fd0ebd5d 100644 --- a/client/service/mws_customer_managed_keys.go +++ b/client/service/mws_customer_managed_keys.go @@ -13,7 +13,7 @@ type MWSCustomerManagedKeysAPI struct { Client *DBApiClient } -// Create creates a set of E2 CustomerManagedKeys for the BYOVPC +// Create creates a set of MWS CustomerManagedKeys for the BYOVPC func (a MWSCustomerManagedKeysAPI) Create(mwsAcctId, keyArn, keyAlias, keyRegion string) (model.MWSCustomerManagedKey, error) { var mwsCustomerManagedKey model.MWSCustomerManagedKey diff --git a/client/service/mws_customer_managed_keys_integration_test.go b/client/service/mws_customer_managed_keys_integration_test.go index d5e13b6c4..7278da1f5 100644 --- a/client/service/mws_customer_managed_keys_integration_test.go +++ b/client/service/mws_customer_managed_keys_integration_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -func TestE2CustomerManagedKeys(t *testing.T) { +func TestMWSCustomerManagedKeys(t *testing.T) { if testing.Short() { t.Skip("skipping integration test in short mode.") } diff --git a/client/service/mws_networks.go b/client/service/mws_networks.go index cebbba3f2..7364774ab 100644 --- a/client/service/mws_networks.go +++ b/client/service/mws_networks.go @@ -12,7 +12,7 @@ type MWSNetworksAPI struct { Client *DBApiClient } -// Create creates a set of E2 Networks for the BYOVPC +// Create creates a set of MWS Networks for the BYOVPC func (a MWSNetworksAPI) Create(mwsAcctId, networkName string, VPCID string, subnetIds []string, securityGroupIds []string) (model.MWSNetwork, error) { var mwsNetwork model.MWSNetwork networksAPIPath := fmt.Sprintf("/accounts/%s/networks", mwsAcctId) diff --git a/client/service/mws_networks_integration_test.go b/client/service/mws_networks_integration_test.go index 7f474e39d..7beb6d19b 100644 --- a/client/service/mws_networks_integration_test.go +++ b/client/service/mws_networks_integration_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -func TestE2Networks(t *testing.T) { +func TestMWSNetworks(t *testing.T) { if testing.Short() { t.Skip("skipping integration test in short mode.") } diff --git a/client/service/mws_storage_configurations_integration_test.go b/client/service/mws_storage_configurations_integration_test.go index 7e98ea826..db9bc1434 100644 --- a/client/service/mws_storage_configurations_integration_test.go +++ b/client/service/mws_storage_configurations_integration_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -func TestE2StorageConfigurations(t *testing.T) { +func TestMWSStorageConfigurations(t *testing.T) { if testing.Short() { t.Skip("skipping integration test in short mode.") } diff --git a/client/service/mws_workspaces_integration_test.go b/client/service/mws_workspaces_integration_test.go index 68d643092..071097efd 100644 --- a/client/service/mws_workspaces_integration_test.go +++ b/client/service/mws_workspaces_integration_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -func TestE2Workspace(t *testing.T) { +func TestMWSWorkspace(t *testing.T) { if testing.Short() { t.Skip("skipping integration test in short mode.") } From 98e93497423ea23ac8e6b945554675d408de466c Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Tue, 9 Jun 2020 18:16:17 -0400 Subject: [PATCH 07/10] patch support for workspace to use customer managed key, mws_workspaces resource can now use customer managed key with a "" default, and updated the docs to reflect cmk --- client/service/mws_workspaces.go | 6 +++++- .../resource_databricks_mws_workspaces.go | 17 ++++++++++++++--- .../Resources/mws_storage_configurations.md | 4 ++-- website/content/Resources/mws_workspaces.md | 8 ++++++++ 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/client/service/mws_workspaces.go b/client/service/mws_workspaces.go index c0ac29824..a56fb6374 100644 --- a/client/service/mws_workspaces.go +++ b/client/service/mws_workspaces.go @@ -76,7 +76,7 @@ func (a MWSWorkspacesAPI) WaitForWorkspaceRunning(mwsAcctId string, workspaceID } // Patch will relaunch the mws workspace deployment TODO: may need to include customer managed key -func (a MWSWorkspacesAPI) Patch(mwsAcctId string, workspaceID int64, awsRegion, credentialsID, storageConfigurationID, networkID string, isNoPublicIpEnabled bool) error { +func (a MWSWorkspacesAPI) Patch(mwsAcctId string, workspaceID int64, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyID string, isNoPublicIpEnabled bool) error { workspacesAPIPath := fmt.Sprintf("/accounts/%s/workspaces/%d", mwsAcctId, workspaceID) mwsWorkspacesRequest := model.MWSWorkspace{ @@ -90,6 +90,10 @@ func (a MWSWorkspacesAPI) Patch(mwsAcctId string, workspaceID int64, awsRegion, mwsWorkspacesRequest.NetworkID = networkID } + if !reflect.ValueOf(customerManagedKeyID).IsZero() { + mwsWorkspacesRequest.CustomerManagedKeyID = customerManagedKeyID + } + _, err := a.Client.performQuery(http.MethodPatch, workspacesAPIPath, "2.0", nil, mwsWorkspacesRequest, nil) return err } diff --git a/databricks/resource_databricks_mws_workspaces.go b/databricks/resource_databricks_mws_workspaces.go index bd51a9f24..dcadd056c 100644 --- a/databricks/resource_databricks_mws_workspaces.go +++ b/databricks/resource_databricks_mws_workspaces.go @@ -69,6 +69,11 @@ func resourceMWSWorkspaces() *schema.Resource { Type: schema.TypeBool, Required: true, }, + "customer_managed_key_id": { + Type: schema.TypeString, + Default: "", + Optional: true, + }, "network_id": { Type: schema.TypeString, Optional: true, @@ -127,14 +132,15 @@ func resourceMWSWorkspacesCreate(d *schema.ResourceData, m interface{}) error { credentialsID := d.Get("credentials_id").(string) storageConfigurationID := d.Get("storage_configuration_id").(string) networkID := d.Get("network_id").(string) + customerManagedKeyId := d.Get("customer_managed_key_id").(string) isNoPublicIpEnabled := d.Get("is_no_public_ip_enabled").(bool) var workspace model.MWSWorkspace var err error - workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, "", isNoPublicIpEnabled) + workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyId, isNoPublicIpEnabled) // Sometimes workspaces api is buggy if err != nil { time.Sleep(15 * time.Second) - workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, "", isNoPublicIpEnabled) + workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyId, isNoPublicIpEnabled) if err != nil { return err } @@ -214,6 +220,10 @@ func resourceMWSWorkspacesRead(d *schema.ResourceData, m interface{}) error { if err != nil { return err } + err = d.Set("customer_managed_key_id", workspace.CustomerManagedKeyID) + if err != nil { + return err + } err = d.Set("account_id", workspace.AccountID) if err != nil { return err @@ -272,9 +282,10 @@ func resourceMWSWorkspacePatch(d *schema.ResourceData, m interface{}) error { credentialsID := d.Get("credentials_id").(string) storageConfigurationID := d.Get("storage_configuration_id").(string) networkID := d.Get("network_id").(string) + customerManagedKeyId := d.Get("customer_managed_key_id").(string) isNoPublicIpEnabled := d.Get("is_no_public_ip_enabled").(bool) - err = client.MWSWorkspaces().Patch(packagedMwsId.MwsAcctId, idInt64, awsRegion, credentialsID, storageConfigurationID, networkID, isNoPublicIpEnabled) + err = client.MWSWorkspaces().Patch(packagedMwsId.MwsAcctId, idInt64, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyId, isNoPublicIpEnabled) if err != nil { return err } diff --git a/website/content/Resources/mws_storage_configurations.md b/website/content/Resources/mws_storage_configurations.md index 2f9249016..4c13c03c4 100644 --- a/website/content/Resources/mws_storage_configurations.md +++ b/website/content/Resources/mws_storage_configurations.md @@ -1,5 +1,5 @@ +++ -title = "multiworkspace_storage_configurations" +title = "mws_storage_configurations" date = 2020-04-20T23:34:03-04:00 weight = 15 chapter = false @@ -7,7 +7,7 @@ pre = "" +++ -## Resource: `multiworkspace_storage_configurations` +## Resource: `mws_storage_configurations` This resource to configure the root bucket for the multiple workspaces api. diff --git a/website/content/Resources/mws_workspaces.md b/website/content/Resources/mws_workspaces.md index 810d08609..1b0c7679e 100644 --- a/website/content/Resources/mws_workspaces.md +++ b/website/content/Resources/mws_workspaces.md @@ -11,6 +11,11 @@ pre = "" This resource to configure the vpc for the multiple workspaces api if the BYOVPC option is chosen. +{{% notice warning %}} +This provider does not yet support the customer_managed_key resource yet so you will need to manually create that +and provide the cmk object guid into the workspace api. You can see it on the argument reference below. +{{% /notice %}} + {{% notice warning %}} It is important to understand that this will require you to configure your provider separately for the multiple workspaces resources @@ -95,6 +100,9 @@ The following arguments are supported: #### - `network_id`: > **(Optional)** The ID of the workspace's network configuration object. +#### - `customer_managed_key_id`: +> **(Optional)** The ID of the workspace's notebook encryption key configuration object. + #### - `is_no_public_ip_enabled`: > **(Optional)** Specifies whether secure cluster connectivity (sometimes called no public IP) is enabled on this workspace. From c206821d1f2236832cb5e3395224e0d199e864a5 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Tue, 9 Jun 2020 18:27:10 -0400 Subject: [PATCH 08/10] fixed useragent and moved to begining of config object creation --- databricks/provider.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/databricks/provider.go b/databricks/provider.go index ee882327d..ab195d3d2 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -231,6 +231,9 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac // Call setup to configure retryable httpclient config.Setup() + //version information from go-releaser using -ldflags to tell the golang linker to send semver info + config.UserAgent = fmt.Sprintf("databricks-tf-provider-%s", providerVersion) + if _, ok := d.GetOk("azure_auth"); !ok { if host, ok := d.GetOk("host"); ok { config.Host = host.(string) @@ -255,9 +258,7 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac return providerConfigureAzureClient(d, providerVersion, &config) } - //TODO: Bake the version of the provider using -ldflags to tell the golang linker to send - //version information from go-releaser - config.UserAgent = fmt.Sprintf("databricks-tf-provider-%s", providerVersion) + var dbClient service.DBApiClient dbClient.SetConfig(&config) return &dbClient, nil From 9b3ea1b995a341c9d870ac3425ba779d712a4c24 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Tue, 9 Jun 2020 18:30:32 -0400 Subject: [PATCH 09/10] removed linebreak from fmt --- databricks/provider.go | 1 - 1 file changed, 1 deletion(-) diff --git a/databricks/provider.go b/databricks/provider.go index ab195d3d2..dfb15a504 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -258,7 +258,6 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac return providerConfigureAzureClient(d, providerVersion, &config) } - var dbClient service.DBApiClient dbClient.SetConfig(&config) return &dbClient, nil From 1c2762916df68a371bc1a49012e3560513bab727 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Tue, 9 Jun 2020 19:00:03 -0400 Subject: [PATCH 10/10] small resource naming doc fix --- website/content/Resources/mws_credentials.md | 2 +- website/content/Resources/mws_networks.md | 2 +- website/content/Resources/mws_storage_configurations.md | 2 +- website/content/Resources/mws_workspaces.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/content/Resources/mws_credentials.md b/website/content/Resources/mws_credentials.md index b87912ca9..14e0f62f6 100644 --- a/website/content/Resources/mws_credentials.md +++ b/website/content/Resources/mws_credentials.md @@ -7,7 +7,7 @@ pre = "" +++ -## Resource: `mws_credentials` +## Resource: `databricks_mws_credentials` This resource to configure the credentials for the multiple workspaces api. diff --git a/website/content/Resources/mws_networks.md b/website/content/Resources/mws_networks.md index dbacb20c4..36ac9329c 100644 --- a/website/content/Resources/mws_networks.md +++ b/website/content/Resources/mws_networks.md @@ -7,7 +7,7 @@ pre = "" +++ -## Resource: `mws_networks` +## Resource: `databricks_mws_networks` This resource to configure the vpc for the multiple workspaces api if the BYOVPC option is chosen. diff --git a/website/content/Resources/mws_storage_configurations.md b/website/content/Resources/mws_storage_configurations.md index 4c13c03c4..ae1131acf 100644 --- a/website/content/Resources/mws_storage_configurations.md +++ b/website/content/Resources/mws_storage_configurations.md @@ -7,7 +7,7 @@ pre = "" +++ -## Resource: `mws_storage_configurations` +## Resource: `databricks_mws_storage_configurations` This resource to configure the root bucket for the multiple workspaces api. diff --git a/website/content/Resources/mws_workspaces.md b/website/content/Resources/mws_workspaces.md index 1b0c7679e..6c67b6b64 100644 --- a/website/content/Resources/mws_workspaces.md +++ b/website/content/Resources/mws_workspaces.md @@ -7,7 +7,7 @@ pre = "" +++ -## Resource: `mws_workspaces` +## Resource: `databricks_mws_workspaces` This resource to configure the vpc for the multiple workspaces api if the BYOVPC option is chosen.