diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index 9bcc92744c..f4cef93c54 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -47,7 +47,6 @@ jobs: generic: - 'mongodbatlas/data_source_mongodbatlas_backup_compliance_policy*.go' - 'mongodbatlas/resource_mongodbatlas_backup_compliance_policy*.go' - - 'mongodbatlas/data_source_mongodbatlas_data_lakes_test*.go' - 'mongodbatlas/resource_mongodbatlas_x509_authentication_database_user*.go' - 'mongodbatlas/resource_mongodbatlas_auditing*.go' backup_online_archive: diff --git a/examples/atlas-dataLake-roles/aws-roles.tf b/examples/atlas-dataLake-roles/aws-roles.tf deleted file mode 100644 index 3d28836126..0000000000 --- a/examples/atlas-dataLake-roles/aws-roles.tf +++ /dev/null @@ -1,42 +0,0 @@ -resource "aws_iam_role_policy" "test_policy" { - name = "test_policy" - role = aws_iam_role.test_role.id - - policy = <<-EOF - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "*", - "Resource": "*" - } - ] - } - EOF -} - -resource "aws_iam_role" "test_role" { - name = "test_role" - - assume_role_policy = < 0 { - dataLakesMap = make([]map[string]interface{}, len(dataLakes)) - - for i := range dataLakes { - dataLakesMap[i] = map[string]interface{}{ - "project_id": dataLakes[i].GroupID, - "name": dataLakes[i].Name, - "aws": flattenAWSBlock(&dataLakes[i].CloudProviderConfig), - "data_process_region": flattenDataLakeProcessRegion(&dataLakes[i].DataProcessRegion), - "hostnames": dataLakes[i].Hostnames, - "state": dataLakes[i].State, - "storage_databases": flattenDataLakeStorageDatabases(dataLakes[i].Storage.Databases), - "storage_stores": flattenDataLakeStorageStores(dataLakes[i].Storage.Stores), - } - } - } - - return dataLakesMap -} diff --git a/mongodbatlas/data_source_mongodbatlas_data_lakes_test.go b/mongodbatlas/data_source_mongodbatlas_data_lakes_test.go deleted file mode 100644 index 4618cb15f9..0000000000 --- a/mongodbatlas/data_source_mongodbatlas_data_lakes_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package mongodbatlas - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" -) - -func TestAccGenericBackupDSDataLakes_basic(t *testing.T) { - resourceName := "data.mongodbatlas_data_lakes.test" - orgID := os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName := acctest.RandomWithPrefix("test-acc") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckBasic(t) }, - ProtoV6ProviderFactories: testAccProviderV6Factories, - Steps: []resource.TestStep{ - { - Config: testAccMongoDBAtlasDataLakesDataSourceConfig(orgID, projectName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttrSet(resourceName, "results.#"), - ), - }, - }, - }) -} - -func testAccMongoDBAtlasDataLakesDataSourceConfig(orgID, projectName string) string { - return fmt.Sprintf(` - resource "mongodbatlas_project" "backup_project" { - name = %[2]q - org_id = %[1]q - } - data "mongodbatlas_data_lakes" "test" { - project_id = mongodbatlas_project.backup_project.id - } - `, orgID, projectName) -} diff --git a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoint_service_adl.go b/mongodbatlas/data_source_mongodbatlas_privatelink_endpoint_service_adl.go deleted file mode 100644 index d8e88fc3f4..0000000000 --- a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoint_service_adl.go +++ /dev/null @@ -1,81 +0,0 @@ -package mongodbatlas - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func dataSourceMongoDBAtlasPrivateLinkEndpointServiceADL() *schema.Resource { - return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasPrivateLinkEndpointServiceADLRead, - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - }, - "endpoint_id": { - Type: schema.TypeString, - Required: true, - }, - "provider_name": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "comment": { - Type: schema.TypeString, - Computed: true, - }, - }, - DeprecationMessage: fmt.Sprintf(DeprecationMessage, "v1.12.0", "mongodbatlas_privatelink_endpoint_service_data_federation_online_archive"), - } -} - -func dataSourceMongoDBAtlasPrivateLinkEndpointServiceADLRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // Get client connection. - conn := meta.(*MongoDBClient).Atlas - projectID := d.Get("project_id").(string) - endpointID := d.Get("endpoint_id").(string) - - privateLinkResponse, _, err := conn.DataLakes.GetPrivateLinkEndpoint(ctx, projectID, endpointID) - if err != nil { - // case 404 - // deleted in the backend case - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - - return diag.Errorf("error getting ADL PrivateLink Endpoint Information: %s", err) - } - - if err := d.Set("endpoint_id", privateLinkResponse.EndpointID); err != nil { - return diag.Errorf("error setting `endpoint_id` for endpoint_id (%s): %s", d.Id(), err) - } - - if err := d.Set("type", privateLinkResponse.Type); err != nil { - return diag.Errorf("error setting `type` for endpoint_id (%s): %s", d.Id(), err) - } - - if err := d.Set("comment", privateLinkResponse.Comment); err != nil { - return diag.Errorf("error setting `comment` for endpoint_id (%s): %s", d.Id(), err) - } - - if err := d.Set("provider_name", privateLinkResponse.Provider); err != nil { - return diag.Errorf("error setting `provider_name` for endpoint_id (%s): %s", d.Id(), err) - } - - d.SetId(encodeStateID(map[string]string{ - "project_id": projectID, - "endpoint_id": privateLinkResponse.EndpointID, - })) - - return nil -} diff --git a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoint_service_adl_test.go b/mongodbatlas/data_source_mongodbatlas_privatelink_endpoint_service_adl_test.go deleted file mode 100644 index 03b028e752..0000000000 --- a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoint_service_adl_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package mongodbatlas - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/resource" -) - -func TestAccNetworkDSPrivateLinkEndpointServiceADL_basic(t *testing.T) { - datasourceName := "data.mongodbatlas_privatelink_endpoint_service_adl.test" - projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") - endpointID := "vpce-jjg5e24qp93513h03" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProtoV6ProviderFactories: testAccProviderV6Factories, - Steps: []resource.TestStep{ - { - Config: testAccMongoDBAtlasPrivateLinkEndpointADLDataSourceConfig(projectID, endpointID), - Check: resource.ComposeTestCheckFunc( - testAccCheckMongoDBAtlasPrivateLinkEndpointServiceADLExists(datasourceName), - resource.TestCheckResourceAttr(datasourceName, "endpoint_id", endpointID), - resource.TestCheckResourceAttr(datasourceName, "type", "DATA_LAKE"), - resource.TestCheckResourceAttr(datasourceName, "provider_name", "AWS"), - resource.TestCheckResourceAttr(datasourceName, "comment", "private link adl comment"), - ), - }, - }, - }) -} - -func testAccMongoDBAtlasPrivateLinkEndpointADLDataSourceConfig(projectID, endpointID string) string { - return fmt.Sprintf(` - resource "mongodbatlas_privatelink_endpoint_service_adl" "test" { - project_id = "%[1]s" - endpoint_id = "%[2]s" - comment = "private link adl comment" - type = "DATA_LAKE" - provider_name = "AWS" - } - - data "mongodbatlas_privatelink_endpoint_service_adl" "test" { - project_id = mongodbatlas_privatelink_endpoint_service_adl.test.project_id - endpoint_id = mongodbatlas_privatelink_endpoint_service_adl.test.endpoint_id - } - `, projectID, endpointID) -} diff --git a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoints_service_adl.go b/mongodbatlas/data_source_mongodbatlas_privatelink_endpoints_service_adl.go deleted file mode 100644 index d97c513dcf..0000000000 --- a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoints_service_adl.go +++ /dev/null @@ -1,130 +0,0 @@ -package mongodbatlas - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - matlas "go.mongodb.org/atlas/mongodbatlas" -) - -func dataSourceMongoDBAtlasPrivateLinkEndpointsServiceADL() *schema.Resource { - return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasPrivateLinkEndpointsServiceADLRead, - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - }, - "links": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "href": { - Type: schema.TypeString, - Computed: true, - }, - "rel": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "results": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "endpoint_id": { - Type: schema.TypeString, - Computed: true, - }, - "provider_name": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "comment": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "total_count": { - Type: schema.TypeInt, - Computed: true, - }, - }, - DeprecationMessage: fmt.Sprintf(DeprecationMessage, "v1.12.0", "mongodbatlas_privatelink_endpoint_service_data_federation_online_archives"), - } -} - -func dataSourceMongoDBAtlasPrivateLinkEndpointsServiceADLRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // Get client connection. - conn := meta.(*MongoDBClient).Atlas - projectID := d.Get("project_id").(string) - - privateLinkEndpoints, _, err := conn.DataLakes.ListPrivateLinkEndpoint(ctx, projectID) - if err != nil { - return diag.Errorf("error getting ADL PrivateLink Endpoints Information: %s", err) - } - - if err := d.Set("links", flattenADLPrivateEndpointLinks(privateLinkEndpoints.Links)); err != nil { - return diag.Errorf("error setting `results`: %s", err) - } - - if err := d.Set("results", flattenADLPrivateLinkEndpoints(privateLinkEndpoints.Results)); err != nil { - return diag.Errorf("error setting `results`: %s", err) - } - - if err := d.Set("total_count", privateLinkEndpoints.TotalCount); err != nil { - return diag.Errorf("error setting `total_count`: %s", err) - } - - d.SetId(id.UniqueId()) - - return nil -} - -func flattenADLPrivateEndpointLinks(links []*matlas.Link) []map[string]interface{} { - linksList := make([]map[string]interface{}, 0) - - for _, link := range links { - mLink := map[string]interface{}{ - "href": link.Href, - "rel": link.Rel, - } - linksList = append(linksList, mLink) - } - - return linksList -} - -func flattenADLPrivateLinkEndpoints(privateLinks []*matlas.PrivateLinkEndpointDataLake) []map[string]interface{} { - var results []map[string]interface{} - - if len(privateLinks) == 0 { - return results - } - - results = make([]map[string]interface{}, len(privateLinks)) - - for k, privateLink := range privateLinks { - results[k] = map[string]interface{}{ - "endpoint_id": privateLink.EndpointID, - "type": privateLink.Type, - "provider_name": privateLink.Provider, - "comment": privateLink.Comment, - } - } - - return results -} diff --git a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoints_service_adl_test.go b/mongodbatlas/data_source_mongodbatlas_privatelink_endpoints_service_adl_test.go deleted file mode 100644 index bf4fb5cb9d..0000000000 --- a/mongodbatlas/data_source_mongodbatlas_privatelink_endpoints_service_adl_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package mongodbatlas - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/resource" -) - -func TestAccNetworkDSPrivateLinkEndpointsServiceADL_basic(t *testing.T) { - datasourceName := "data.mongodbatlas_privatelink_endpoints_service_adl.test" - projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") - endpointID := "vpce-jjg5e24qp93513h03" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProtoV6ProviderFactories: testAccProviderV6Factories, - Steps: []resource.TestStep{ - { - Config: testAccMongoDBAtlasPrivateLinkEndpointsADLDataSourceConfig(projectID, endpointID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet(datasourceName, "project_id"), - resource.TestCheckResourceAttrSet(datasourceName, "links.#"), - resource.TestCheckResourceAttrSet(datasourceName, "results.#"), - resource.TestCheckResourceAttrSet(datasourceName, "results.0.endpoint_id"), - resource.TestCheckResourceAttrSet(datasourceName, "results.0.type"), - resource.TestCheckResourceAttrSet(datasourceName, "results.0.provider_name"), - resource.TestCheckResourceAttrSet(datasourceName, "results.0.comment"), - resource.TestCheckResourceAttr(datasourceName, "total_count", "1"), - ), - }, - }, - }) -} - -func testAccMongoDBAtlasPrivateLinkEndpointsADLDataSourceConfig(projectID, endpointID string) string { - return fmt.Sprintf(` - resource "mongodbatlas_privatelink_endpoint_service_adl" "test" { - project_id = "%[1]s" - endpoint_id = "%[2]s" - comment = "private link adl comment" - type = "DATA_LAKE" - provider_name = "AWS" - } - - data "mongodbatlas_privatelink_endpoints_service_adl" "test" { - project_id = mongodbatlas_privatelink_endpoint_service_adl.test.project_id - } - `, projectID, endpointID) -} diff --git a/mongodbatlas/provider.go b/mongodbatlas/provider.go index e078eec3dd..b51072bd57 100644 --- a/mongodbatlas/provider.go +++ b/mongodbatlas/provider.go @@ -141,8 +141,6 @@ func getDataSourcesMap() map[string]*schema.Resource { "mongodbatlas_privatelink_endpoint_service": dataSourceMongoDBAtlasPrivateEndpointServiceLink(), "mongodbatlas_privatelink_endpoint_service_serverless": dataSourceMongoDBAtlasPrivateLinkEndpointServerless(), "mongodbatlas_privatelink_endpoints_service_serverless": dataSourceMongoDBAtlasPrivateLinkEndpointsServiceServerless(), - "mongodbatlas_privatelink_endpoint_service_adl": dataSourceMongoDBAtlasPrivateLinkEndpointServiceADL(), - "mongodbatlas_privatelink_endpoints_service_adl": dataSourceMongoDBAtlasPrivateLinkEndpointsServiceADL(), "mongodbatlas_cloud_backup_schedule": dataSourceMongoDBAtlasCloudBackupSchedule(), "mongodbatlas_third_party_integrations": dataSourceMongoDBAtlasThirdPartyIntegrations(), "mongodbatlas_third_party_integration": dataSourceMongoDBAtlasThirdPartyIntegration(), @@ -155,8 +153,6 @@ func getDataSourcesMap() map[string]*schema.Resource { "mongodbatlas_ldap_verify": dataSourceMongoDBAtlasLDAPVerify(), "mongodbatlas_search_index": dataSourceMongoDBAtlasSearchIndex(), "mongodbatlas_search_indexes": dataSourceMongoDBAtlasSearchIndexes(), - "mongodbatlas_data_lake": dataSourceMongoDBAtlasDataLake(), - "mongodbatlas_data_lakes": dataSourceMongoDBAtlasDataLakes(), "mongodbatlas_data_lake_pipeline_run": dataSourceMongoDBAtlasDataLakePipelineRun(), "mongodbatlas_data_lake_pipeline_runs": dataSourceMongoDBAtlasDataLakePipelineRuns(), "mongodbatlas_data_lake_pipeline": dataSourceMongoDBAtlasDataLakePipeline(), @@ -219,7 +215,6 @@ func getResourcesMap() map[string]*schema.Resource { "mongodbatlas_privatelink_endpoint": resourceMongoDBAtlasPrivateLinkEndpoint(), "mongodbatlas_privatelink_endpoint_serverless": resourceMongoDBAtlasPrivateLinkEndpointServerless(), "mongodbatlas_privatelink_endpoint_service": resourceMongoDBAtlasPrivateEndpointServiceLink(), - "mongodbatlas_privatelink_endpoint_service_adl": resourceMongoDBAtlasPrivateLinkEndpointServiceADL(), "mongodbatlas_privatelink_endpoint_service_serverless": resourceMongoDBAtlasPrivateLinkEndpointServiceServerless(), "mongodbatlas_third_party_integration": resourceMongoDBAtlasThirdPartyIntegration(), "mongodbatlas_cloud_provider_access": resourceMongoDBAtlasCloudProviderAccess(), @@ -230,7 +225,6 @@ func getResourcesMap() map[string]*schema.Resource { "mongodbatlas_cloud_provider_access_setup": resourceMongoDBAtlasCloudProviderAccessSetup(), "mongodbatlas_cloud_provider_access_authorization": resourceMongoDBAtlasCloudProviderAccessAuthorization(), "mongodbatlas_search_index": resourceMongoDBAtlasSearchIndex(), - "mongodbatlas_data_lake": resourceMongoDBAtlasDataLake(), "mongodbatlas_data_lake_pipeline": resourceMongoDBAtlasDataLakePipeline(), "mongodbatlas_event_trigger": resourceMongoDBAtlasEventTriggers(), "mongodbatlas_cloud_backup_schedule": resourceMongoDBAtlasCloudBackupSchedule(), diff --git a/mongodbatlas/resource_mongodbatlas_data_lake.go b/mongodbatlas/resource_mongodbatlas_data_lake.go deleted file mode 100644 index ce6ced87b4..0000000000 --- a/mongodbatlas/resource_mongodbatlas_data_lake.go +++ /dev/null @@ -1,561 +0,0 @@ -package mongodbatlas - -import ( - "context" - "errors" - "fmt" - "net/http" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/spf13/cast" - matlas "go.mongodb.org/atlas/mongodbatlas" -) - -const ( - errorDataLakeCreate = "error creating MongoDB Atlas DataLake: %s" - errorDataLakeRead = "error reading MongoDB Atlas DataLake (%s): %s" - errorDataLakeDelete = "error deleting MongoDB Atlas DataLake (%s): %s" - errorDataLakeUpdate = "error updating MongoDB Atlas DataLake (%s): %s" - errorDataLakeSetting = "error setting `%s` for MongoDB Atlas DataLake (%s): %s" -) - -func resourceMongoDBAtlasDataLake() *schema.Resource { - return &schema.Resource{ - CreateContext: resourceMongoDBAtlasDataLakeCreate, - ReadContext: resourceMongoDBAtlasDataLakeRead, - UpdateContext: resourceMongoDBAtlasDataLakeUpdate, - DeleteContext: resourceMongoDBAtlasDataLakeDelete, - Importer: &schema.ResourceImporter{ - StateContext: resourceMongoDBAtlasDataLakeImportState, - }, - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "aws": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role_id": { - Type: schema.TypeString, - Required: true, - }, - "test_s3_bucket": { - Type: schema.TypeString, - Required: true, - }, - "iam_assumed_role_arn": { - Type: schema.TypeString, - Computed: true, - }, - "iam_user_arn": { - Type: schema.TypeString, - Computed: true, - }, - "external_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "data_process_region": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_provider": { - Type: schema.TypeString, - Required: true, - }, - "region": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "hostnames": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "state": { - Type: schema.TypeString, - Computed: true, - }, - "storage_databases": schemaDataLakesDatabases(), - "storage_stores": schemaDataLakesStores(), - }, - DeprecationMessage: fmt.Sprintf(DeprecationMessage, "v1.12.0", "mongodbatlas_federated_database_instance"), - } -} - -func schemaDataLakesDatabases() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - }, - "collections": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - }, - "data_sources": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "store_name": { - Type: schema.TypeString, - Computed: true, - }, - "default_format": { - Type: schema.TypeString, - Computed: true, - }, - "path": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "views": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - }, - "source": { - Type: schema.TypeString, - Computed: true, - }, - "pipeline": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "max_wildcard_collections": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - } -} - -func schemaDataLakesStores() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - }, - "provider": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "bucket": { - Type: schema.TypeString, - Computed: true, - }, - "prefix": { - Type: schema.TypeString, - Computed: true, - }, - "delimiter": { - Type: schema.TypeString, - Computed: true, - }, - "include_tags": { - Type: schema.TypeBool, - Computed: true, - }, - "additional_storage_classes": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - } -} - -func resourceMongoDBAtlasDataLakeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // Get client connection. - conn := meta.(*MongoDBClient).Atlas - projectID := d.Get("project_id").(string) - name := d.Get("name").(string) - - cloudConfig := &matlas.CloudProviderConfig{ - AWSConfig: expandDataLakeAwsBlock(d), - } - - dataLakeReq := &matlas.DataLakeCreateRequest{ - CloudProviderConfig: cloudConfig, - Name: name, - } - - dataLake, _, err := conn.DataLakes.Create(ctx, projectID, dataLakeReq) - if err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeCreate, err)) - } - - dataLake.CloudProviderConfig.AWSConfig.TestS3Bucket = cloudConfig.AWSConfig.TestS3Bucket - - d.SetId(encodeStateID(map[string]string{ - "project_id": projectID, - "name": dataLake.Name, - })) - - return resourceMongoDBAtlasDataLakeRead(ctx, d, meta) -} - -func resourceMongoDBAtlasDataLakeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // Get client connection. - conn := meta.(*MongoDBClient).Atlas - ids := decodeStateID(d.Id()) - projectID := ids["project_id"] - name := ids["name"] - - dataLake, resp, err := conn.DataLakes.Get(ctx, projectID, name) - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - return diag.FromErr(fmt.Errorf(errorDataLakeRead, name, err)) - } - - values := flattenAWSBlock(&dataLake.CloudProviderConfig) - if len(values) != 0 { - if !counterEmptyValues(values[0]) { - if value, ok := d.GetOk("aws"); ok { - v := value.([]interface{}) - if len(v) != 0 { - v1 := v[0].(map[string]interface{}) - values[0]["test_s3_bucket"] = cast.ToString(v1["test_s3_bucket"]) - } - } - - if err = d.Set("aws", values); err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeSetting, "aws", name, err)) - } - } - } - - if err := d.Set("data_process_region", flattenDataLakeProcessRegion(&dataLake.DataProcessRegion)); err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeSetting, "data_process_region", name, err)) - } - - if err := d.Set("hostnames", dataLake.Hostnames); err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeSetting, "hostnames", name, err)) - } - - if err := d.Set("state", dataLake.State); err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeSetting, "state", name, err)) - } - - if err := d.Set("storage_databases", flattenDataLakeStorageDatabases(dataLake.Storage.Databases)); err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeSetting, "storage_databases", name, err)) - } - - if err := d.Set("storage_stores", flattenDataLakeStorageStores(dataLake.Storage.Stores)); err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeSetting, "storage_stores", name, err)) - } - - d.SetId(encodeStateID(map[string]string{ - "project_id": projectID, - "name": name, - })) - - return nil -} - -func resourceMongoDBAtlasDataLakeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // Get client connection. - conn := meta.(*MongoDBClient).Atlas - ids := decodeStateID(d.Id()) - projectID := ids["project_id"] - name := ids["name"] - - dataProcess := &matlas.DataProcessRegion{} - awsConfig := matlas.AwsCloudProviderConfig{} - - if d.HasChange("aws_role_id") { - awsConfig.RoleID = cast.ToString(d.Get("aws_role_id")) - } - - if d.HasChange("aws_test_s3_bucket") { - awsConfig.TestS3Bucket = cast.ToString(d.Get("aws_test_s3_bucket")) - } - - if d.HasChange("data_process_region") { - dataProcess = expandDataLakeDataProcessRegion(d) - } - - dataLakeReq := &matlas.DataLakeUpdateRequest{ - CloudProviderConfig: &matlas.CloudProviderConfig{AWSConfig: awsConfig}, - DataProcessRegion: dataProcess, - } - _, _, err := conn.DataLakes.Update(ctx, projectID, name, dataLakeReq) - if err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeUpdate, name, err)) - } - - return resourceMongoDBAtlasDataLakeRead(ctx, d, meta) -} - -func resourceMongoDBAtlasDataLakeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // Get client connection. - conn := meta.(*MongoDBClient).Atlas - ids := decodeStateID(d.Id()) - projectID := ids["project_id"] - name := ids["name"] - - _, err := conn.DataLakes.Delete(ctx, projectID, name) - if err != nil { - return diag.FromErr(fmt.Errorf(errorDataLakeDelete, name, err)) - } - - return nil -} - -func resourceMongoDBAtlasDataLakeImportState(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - conn := meta.(*MongoDBClient).Atlas - - projectID, name, s3Bucket, err := splitDataLakeImportID(d.Id()) - if err != nil { - return nil, err - } - - u, _, err := conn.DataLakes.Get(ctx, projectID, name) - if err != nil { - return nil, fmt.Errorf("couldn't import data lake(%s) for project (%s), error: %s", name, projectID, err) - } - - if err := d.Set("project_id", u.GroupID); err != nil { - return nil, fmt.Errorf("error setting `project_id` for data lakes (%s): %s", d.Id(), err) - } - - if err := d.Set("name", u.Name); err != nil { - return nil, fmt.Errorf("error setting `name` for data lakes (%s): %s", d.Id(), err) - } - mapAws := make([]map[string]interface{}, 0) - - mapAws = append(mapAws, map[string]interface{}{ - "test_s3_bucket": s3Bucket, - }) - - if err := d.Set("aws", mapAws); err != nil { - return nil, fmt.Errorf("error setting `aws` for data lakes (%s): %s", d.Id(), err) - } - - d.SetId(encodeStateID(map[string]string{ - "project_id": projectID, - "name": u.Name, - })) - - return []*schema.ResourceData{d}, nil -} - -func splitDataLakeImportID(id string) (projectID, name, s3Bucket string, err error) { - var parts = strings.Split(id, "--") - - if len(parts) != 3 { - err = errors.New("import format error: to import a Data Lake, use the format {project_id}--{name}--{test_s3_bucket}") - return - } - - projectID = parts[0] - name = parts[1] - s3Bucket = parts[2] - - return -} - -func flattenAWSBlock(aws *matlas.CloudProviderConfig) []map[string]interface{} { - if aws == nil { - return nil - } - - database := make([]map[string]interface{}, 0) - - database = append(database, map[string]interface{}{ - "role_id": aws.AWSConfig.RoleID, - "iam_assumed_role_arn": aws.AWSConfig.IAMAssumedRoleARN, - "iam_user_arn": aws.AWSConfig.IAMUserARN, - "external_id": aws.AWSConfig.ExternalID, - }) - - return database -} - -func flattenDataLakeProcessRegion(processRegion *matlas.DataProcessRegion) []interface{} { - if processRegion != nil && (processRegion.Region != "" || processRegion.CloudProvider != "") { - return []interface{}{map[string]interface{}{ - "cloud_provider": processRegion.CloudProvider, - "region": processRegion.Region, - }} - } - - return []interface{}{} -} - -func flattenDataLakeStorageDatabases(databases []matlas.DataLakeDatabase) []map[string]interface{} { - database := make([]map[string]interface{}, 0) - - for _, db := range databases { - database = append(database, map[string]interface{}{ - "name": db.Name, - "collections": flattenDataLakeStorageDatabaseCollections(db.Collections), - "views": flattenDataLakeStorageDatabaseViews(db.Views), - "max_wildcard_collections": db.MaxWildcardCollections, - }) - } - - return database -} - -func flattenDataLakeStorageDatabaseCollections(collections []matlas.DataLakeCollection) []map[string]interface{} { - database := make([]map[string]interface{}, 0) - - for _, db := range collections { - database = append(database, map[string]interface{}{ - "name": db.Name, - "data_sources": flattenDataLakeStorageDatabaseCollectionsDataSources(db.DataSources), - }) - } - - return database -} - -func flattenDataLakeStorageDatabaseCollectionsDataSources(dataSources []matlas.DataLakeDataSource) []map[string]interface{} { - database := make([]map[string]interface{}, 0) - - for _, db := range dataSources { - database = append(database, map[string]interface{}{ - "store_name": db.StoreName, - "default_format": db.DefaultFormat, - "path": db.Path, - }) - } - - return database -} - -func flattenDataLakeStorageDatabaseViews(views []matlas.DataLakeDatabaseView) []map[string]interface{} { - view := make([]map[string]interface{}, 0) - - for _, db := range views { - view = append(view, map[string]interface{}{ - "name": db.Name, - "source": db.Source, - "pipeline": db.Pipeline, - }) - } - - return view -} - -func flattenDataLakeStorageStores(stores []matlas.DataLakeStore) []map[string]interface{} { - store := make([]map[string]interface{}, 0) - - for i := range stores { - store = append(store, map[string]interface{}{ - "name": stores[i].Name, - "provider": stores[i].Provider, - "region": stores[i].Region, - "bucket": stores[i].Bucket, - "prefix": stores[i].Prefix, - "delimiter": stores[i].Delimiter, - "include_tags": stores[i].IncludeTags, - "additional_storage_classes": stores[i].AdditionalStorageClasses, - }) - } - - return store -} - -func expandDataLakeAwsBlock(d *schema.ResourceData) matlas.AwsCloudProviderConfig { - aws := matlas.AwsCloudProviderConfig{} - if value, ok := d.GetOk("aws"); ok { - v := value.([]interface{}) - if len(v) != 0 { - v1 := v[0].(map[string]interface{}) - - aws.RoleID = cast.ToString(v1["role_id"]) - aws.TestS3Bucket = cast.ToString(v1["test_s3_bucket"]) - } - } - return aws -} - -func expandDataLakeDataProcessRegion(d *schema.ResourceData) *matlas.DataProcessRegion { - if value, ok := d.GetOk("data_process_region"); ok { - vL := value.([]interface{}) - - if len(vL) != 0 { - v := vL[0].(map[string]interface{}) - - return &matlas.DataProcessRegion{ - CloudProvider: cast.ToString(v["cloud_provider"]), - Region: cast.ToString(v["region"]), - } - } - } - return nil -} - -func counterEmptyValues(values map[string]interface{}) bool { - count := 0 - for i := range values { - if val, ok := values[i]; ok { - strval, okT := val.(string) - if okT && strval == "" || strval == "false" { - count++ - } - } - } - - return len(values) == count -} diff --git a/mongodbatlas/resource_mongodbatlas_data_lake_pipeline.go b/mongodbatlas/resource_mongodbatlas_data_lake_pipeline.go index 4399e83971..08b9189194 100644 --- a/mongodbatlas/resource_mongodbatlas_data_lake_pipeline.go +++ b/mongodbatlas/resource_mongodbatlas_data_lake_pipeline.go @@ -20,6 +20,7 @@ const ( errorDataLakePipelineDelete = "error deleting MongoDB Atlas DataLake Pipeline (%s): %s" errorDataLakePipelineUpdate = "error updating MongoDB Atlas DataLake Pipeline: %s" errorDataLakePipelineSetting = "error setting `%s` for MongoDB Atlas DataLake Pipeline (%s): %s" + errorDataLakeSetting = "error setting `%s` for MongoDB Atlas DataLake (%s): %s" ) func resourceMongoDBAtlasDataLakePipeline() *schema.Resource { diff --git a/mongodbatlas/resource_mongodbatlas_data_lake_test.go b/mongodbatlas/resource_mongodbatlas_data_lake_test.go deleted file mode 100644 index 3bd23f47c2..0000000000 --- a/mongodbatlas/resource_mongodbatlas_data_lake_test.go +++ /dev/null @@ -1,253 +0,0 @@ -package mongodbatlas - -import ( - "context" - "fmt" - "log" - "os" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" - matlas "go.mongodb.org/atlas/mongodbatlas" -) - -func TestAccBackupRSDataLake_basic(t *testing.T) { - SkipTestExtCred(t) - var ( - resourceName = "mongodbatlas_data_lake.test" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acctest.RandomWithPrefix("test-acc") - name = acctest.RandomWithPrefix("test-acc") - policyName = acctest.RandomWithPrefix("test-acc") - roleName = acctest.RandomWithPrefix("test-acc") - testS3Bucket = os.Getenv("AWS_S3_BUCKET") - testS3BucketUpdated = os.Getenv("AWS_S3_BUCKET_UPDATED") - dataLakeRegion = "VIRGINIA_USA" - ) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProtoV6ProviderFactories: testAccProviderV6Factories, - CheckDestroy: testAccCheckMongoDBAtlasDataLakeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccMongoDBAtlasDataLakeConfig(policyName, roleName, projectName, orgID, name, testS3Bucket, dataLakeRegion, false), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "name", name), - ), - }, - { - Config: testAccMongoDBAtlasDataLakeConfig(policyName, roleName, projectName, orgID, name, testS3BucketUpdated, dataLakeRegion, true), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "name", name), - ), - }, - }, - }) -} - -func TestAccBackupRSDataLake_importBasic(t *testing.T) { - SkipTestExtCred(t) - var ( - resourceName = "mongodbatlas_data_lake.test" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acctest.RandomWithPrefix("test-acc") - name = acctest.RandomWithPrefix("test-acc") - policyName = acctest.RandomWithPrefix("test-acc") - roleName = acctest.RandomWithPrefix("test-acc") - testS3Bucket = os.Getenv("AWS_S3_BUCKET") - ) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProtoV6ProviderFactories: testAccProviderV6Factories, - CheckDestroy: testAccCheckMongoDBAtlasDataLakeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccMongoDBAtlasDataLakeConfig(policyName, roleName, projectName, orgID, name, testS3Bucket, "", false), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "name", name), - ), - }, - { - ResourceName: resourceName, - ImportStateIdFunc: testAccCheckMongoDBAtlasDataLakeImportStateIDFunc(resourceName, testS3Bucket), - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckMongoDBAtlasDataLakeImportStateIDFunc(resourceName, s3Bucket string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("not found: %s", resourceName) - } - - ids := decodeStateID(rs.Primary.ID) - - return fmt.Sprintf("%s--%s--%s", ids["project_id"], ids["name"], s3Bucket), nil - } -} - -func testAccCheckMongoDBAtlasDataLakeExists(resourceName string, dataLake *matlas.DataLake) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProviderSdkV2.Meta().(*MongoDBClient).Atlas - - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("not found: %s", resourceName) - } - - if rs.Primary.Attributes["project_id"] == "" { - return fmt.Errorf("no ID is set") - } - - ids := decodeStateID(rs.Primary.ID) - - if dataLakeResp, _, err := conn.DataLakes.Get(context.Background(), ids["project_id"], ids["name"]); err == nil { - *dataLake = *dataLakeResp - return nil - } - - return fmt.Errorf("datalake (%s) does not exist", ids["project_id"]) - } -} - -func testAccCheckMongoDBAtlasDataLakeAttributes(dataLake *matlas.DataLake, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - log.Printf("[DEBUG] difference dataLake.Name: %s , username : %s", dataLake.Name, name) - if dataLake.Name != name { - return fmt.Errorf("bad datalake name: %s", dataLake.Name) - } - - return nil - } -} - -func testAccCheckMongoDBAtlasDataLakeDestroy(s *terraform.State) error { - conn := testAccProviderSdkV2.Meta().(*MongoDBClient).Atlas - - for _, rs := range s.RootModule().Resources { - if rs.Type != "mongodbatlas_data_lake" { - continue - } - - ids := decodeStateID(rs.Primary.ID) - // Try to find the database user - _, _, err := conn.DataLakes.Get(context.Background(), ids["project_id"], ids["name"]) - if err == nil { - return fmt.Errorf("datalake (%s) still exists", ids["project_id"]) - } - } - - return nil -} - -func testAccMongoDBAtlasDataLakeConfig(policyName, roleName, projectName, orgID, name, testS3Bucket, dataLakeRegion string, isUpdate bool) string { - stepDataLakeConfig := testAccMongoDBAtlasDataLakeConfigFirstStep(name, testS3Bucket) - if isUpdate { - stepDataLakeConfig = testAccMongoDBAtlasDataLakeConfigSecondStep(name, testS3Bucket, dataLakeRegion) - } - return fmt.Sprintf(` -resource "aws_iam_role_policy" "test_policy" { - name = %[1]q - role = aws_iam_role.test_role.id - - policy = <<-EOF - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "*", - "Resource": "*" - } - ] - } - EOF -} - -resource "aws_iam_role" "test_role" { - name = %[2]q - - assume_role_policy = < **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. - -## Example Usage - -```terraform -resource "mongodbatlas_project" "test" { - name = "NAME OF THE PROJECT" - org_id = "ORGANIZATION ID" -} -resource "mongodbatlas_cloud_provider_access" "test" { - project_id = mongodbatlas_project.test.id - provider_name = "AWS" - iam_assumed_role_arn = "AWS ROLE ID" -} - -resource "mongodbatlas_data_lake" "basic_ds" { - project_id = mongodbatlas_project.test.id - name = "DATA LAKE NAME" - aws{ - role_id = mongodbatlas_cloud_provider_access.test.role_id - test_s3_bucket = "TEST S3 BUCKET NAME" - } -} - -data "mongodbatlas_data_lake" "test" { - project_id = mongodbatlas_data_lake.test.project_id - name = mongodbatlas_data_lake.test.name -} -``` - -## Argument Reference - -* `name` - (Required) Name of the data lake. -* `project_id` - (Required) The unique ID for the project to create a data lake. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The Terraform's unique identifier used internally for state management. -* `aws` - AWS provider of the cloud service where Data Lake can access the S3 Bucket. - * `aws.0.role_id` - Unique identifier of the role that Data Lake can use to access the data stores. - * `aws.0.test_s3_bucket` - Name of the S3 data bucket that the provided role ID is authorized to access. - * `aws.0.role_id` - Unique identifier of the role that Data Lake can use to access the data stores. - * `aws.0.test_s3_bucket` - Name of the S3 data bucket that the provided role ID is authorized to access. - * `aws.0.iam_assumed_role_arn` - Amazon Resource Name (ARN) of the IAM Role that Data Lake assumes when accessing S3 Bucket data stores. - - For more information on S3 actions, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html). - -* `aws_iam_user_arn` - Amazon Resource Name (ARN) of the user that Data Lake assumes when accessing S3 Bucket data stores. -* `aws_external_id` - Unique identifier associated with the IAM Role that Data Lake assumes when accessing the data stores. - -* `data_process_region` - The cloud provider region to which Atlas Data Lake routes client connections for data processing. - * `data_process_region.0.cloud_provider` - Name of the cloud service provider. - * `data_process_region.0.region` -Name of the region to which Data Lake routes client connections for data processing. -* `hostnames` - The list of hostnames assigned to the Atlas Data Lake. Each string in the array is a hostname assigned to the Atlas Data Lake. -* `state` - Current state of the Atlas Data Lake: - * `ACTIVE` - The Data Lake is active and verified. You can query the data stores associated with the Atlas Data Lake. -* `storage_databases` - Configuration details for mapping each data store to queryable databases and collections. - * `storage_databases.#.name` - Name of the database to which Data Lake maps the data contained in the data store. - * `storage_databases.#.collections` - Array of objects where each object represents a collection and data sources that map to a [stores](https://docs.mongodb.com/datalake/reference/format/data-lake-configuration#mongodb-datalakeconf-datalakeconf.stores) data store. - * `storage_databases.#.collections.#.name` - Name of the collection. - * `storage_databases.#.collections.#.data_sources` - Array of objects where each object represents a stores data store to map with the collection. - * `storage_databases.#.collections.#.data_sources.#.store_name` - Name of a data store to map to the ``. - * `storage_databases.#.collections.#.data_sources.#.default_format` - Default format that Data Lake assumes if it encounters a file without an extension while searching the storeName. - * `storage_databases.#.collections.#.data_sources.#.path` - Controls how Atlas Data Lake searches for and parses files in the storeName before mapping them to the ``. - * `storage_databases.#.views` - Array of objects where each object represents an [aggregation pipeline](https://docs.mongodb.com/manual/core/aggregation-pipeline/#id1) on a collection. - * `storage_databases.#.views.#.name` - Name of the view. - * `storage_databases.#.views.#.source` - Name of the source collection for the view. - * `storage_databases.#.views.#.pipeline`- Aggregation pipeline stage(s) to apply to the source collection. -* `storage_stores` - Each object in the array represents a data store. Data Lake uses the storage.databases configuration details to map data in each data store to queryable databases and collections. - * `storage_stores.#.name` - Name of the data store. - * `storage_stores.#.provider` - Defines where the data is stored. - * `storage_stores.#.region` - Name of the AWS region in which the S3 bucket is hosted. - * `storage_stores.#.bucket` - Name of the AWS S3 bucket. - * `storage_stores.#.prefix` - Prefix Data Lake applies when searching for files in the S3 bucket . - * `storage_stores.#.delimiter` - The delimiter that separates `storage_databases.#.collections.#.data_sources.#.path` segments in the data store. - * `storage_stores.#.include_tags` - Determines whether or not to use S3 tags on the files in the given path as additional partition attributes. - -See [MongoDB Atlas API](https://docs.mongodb.com/datalake/reference/api/dataLakes-get-one-tenant) Documentation for more information. \ No newline at end of file diff --git a/website/docs/d/data_lakes.html.markdown b/website/docs/d/data_lakes.html.markdown deleted file mode 100644 index 844c34079b..0000000000 --- a/website/docs/d/data_lakes.html.markdown +++ /dev/null @@ -1,73 +0,0 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: data_lakes" -sidebar_current: "docs-mongodbatlas-datasource-data-lakes" -description: |- - Describes a Data Lakes. ---- - -# Data Source: mongodbatlas_data_lakes - -`mongodbatlas_data_lakes` describe all Data Lakes. - - --> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. - -## Example Usage - -```terraform -data "mongodbatlas_data_lakes" "test" { - project_id = "PROJECT ID" -} -``` - -## Argument Reference - -* `project_id` - (Required) The unique ID for the project to get all data lakes. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - Autogenerated Unique ID for this data source. -* `results` - A list where each represents a Data lake. - - -### Data Lake - -* `aws_role_id` - Unique identifier of the role that Data Lake can use to access the data stores. -* `aws_test_s3_bucket` - Name of the S3 data bucket that the provided role ID is authorized to access. -* `data_process_region` - The cloud provider region to which Atlas Data Lake routes client connections for data processing. - * `data_process_region.0.cloud_provider` - Name of the cloud service provider. - * `data_process_region.0.region` -Name of the region to which Data Lake routes client connections for data processing. -* `aws_iam_assumed_role_arn` - Amazon Resource Name (ARN) of the IAM Role that Data Lake assumes when accessing S3 Bucket data stores. - - For more information on S3 actions, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html). - -* `aws_iam_user_arn` - Amazon Resource Name (ARN) of the user that Data Lake assumes when accessing S3 Bucket data stores. -* `aws_external_id` - Unique identifier associated with the IAM Role that Data Lake assumes when accessing the data stores. -* `hostnames` - The list of hostnames assigned to the Atlas Data Lake. Each string in the array is a hostname assigned to the Atlas Data Lake. -* `state` - Current state of the Atlas Data Lake: - * `ACTIVE` - The Data Lake is active and verified. You can query the data stores associated with the Atlas Data Lake. -* `storage_databases` - Configuration details for mapping each data store to queryable databases and collections. - * `storage_databases.#.name` - Name of the database to which Data Lake maps the data contained in the data store. - * `storage_databases.#.collections` - Array of objects where each object represents a collection and data sources that map to a [stores](https://docs.mongodb.com/datalake/reference/format/data-lake-configuration#mongodb-datalakeconf-datalakeconf.stores) data store. - * `storage_databases.#.collections.#.name` - Name of the collection. - * `storage_databases.#.collections.#.data_sources` - Array of objects where each object represents a stores data store to map with the collection. - * `storage_databases.#.collections.#.data_sources.#.store_name` - Name of a data store to map to the ``. - * `storage_databases.#.collections.#.data_sources.#.default_format` - Default format that Data Lake assumes if it encounters a file without an extension while searching the storeName. - * `storage_databases.#.collections.#.data_sources.#.path` - Controls how Atlas Data Lake searches for and parses files in the storeName before mapping them to the ``. - * `storage_databases.#.views` - Array of objects where each object represents an [aggregation pipeline](https://docs.mongodb.com/manual/core/aggregation-pipeline/#id1) on a collection. - * `storage_databases.#.views.#.name` - Name of the view. - * `storage_databases.#.views.#.source` - Name of the source collection for the view. - * `storage_databases.#.views.#.pipeline`- Aggregation pipeline stage(s) to apply to the source collection. -* `storage_stores` - Each object in the array represents a data store. Data Lake uses the storage.databases configuration details to map data in each data store to queryable databases and collections. - * `storage_stores.#.name` - Name of the data store. - * `storage_stores.#.provider` - Defines where the data is stored. - * `storage_stores.#.region` - Name of the AWS region in which the S3 bucket is hosted. - * `storage_stores.#.bucket` - Name of the AWS S3 bucket. - * `storage_stores.#.prefix` - Prefix Data Lake applies when searching for files in the S3 bucket . - * `storage_stores.#.delimiter` - The delimiter that separates `storage_databases.#.collections.#.data_sources.#.path` segments in the data store. - * `storage_stores.#.include_tags` - Determines whether or not to use S3 tags on the files in the given path as additional partition attributes. - -See [MongoDB Atlas API](https://docs.mongodb.com/datalake/reference/api/dataLakes-get-all-tenants) Documentation for more information. \ No newline at end of file diff --git a/website/docs/r/data_lake.html.markdown b/website/docs/r/data_lake.html.markdown deleted file mode 100644 index e68c898c45..0000000000 --- a/website/docs/r/data_lake.html.markdown +++ /dev/null @@ -1,104 +0,0 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: data_lake" -sidebar_current: "docs-mongodbatlas-resource-data-lake" -description: |- - Provides a Data Lake resource. ---- - -# Resource: mongodbatlas_data_lake - -`mongodbatlas_data_lake` provides a Data Lake resource. - --> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. - -~> **IMPORTANT:** All arguments including the password will be stored in the raw state as plain-text. [Read more about sensitive data in state.](https://www.terraform.io/docs/state/sensitive-data.html) - -## Example Usages - - -```terraform -resource "mongodbatlas_project" "test" { - name = "NAME OF THE PROJECT" - org_id = "ORGANIZATION ID" -} -resource "mongodbatlas_cloud_provider_access" "test" { - project_id = mongodbatlas_project.test.id - provider_name = "AWS" - iam_assumed_role_arn = "AWS ROLE ID" -} - -resource "mongodbatlas_data_lake" "basic_ds" { - project_id = mongodbatlas_project.test.id - name = "DATA LAKE NAME" - aws{ - role_id = mongodbatlas_cloud_provider_access.test.role_id - test_s3_bucket = "TEST S3 BUCKET NAME" - } -} -``` - -## Argument Reference - -* `project_id` - (Required) The unique ID for the project to create a data lake. -* `name` - (Required) Name of the Atlas Data Lake. -* `aws` - (Required) AWS provider of the cloud service where Data Lake can access the S3 Bucket. - * `aws.0.role_id` - (Required) Unique identifier of the role that Data Lake can use to access the data stores. If necessary, use the Atlas [UI](https://docs.atlas.mongodb.com/security/manage-iam-roles/) or [API](https://docs.atlas.mongodb.com/reference/api/cloud-provider-access-get-roles/) to retrieve the role ID. You must also specify the `aws.0.test_s3_bucket`. - * `aws.0.test_s3_bucket` - (Required) Name of the S3 data bucket that the provided role ID is authorized to access. You must also specify the `aws.0.role_id`. -* `data_process_region` - (Optional) The cloud provider region to which Atlas Data Lake routes client connections for data processing. Set to `null` to direct Atlas Data Lake to route client connections to the region nearest to the client based on DNS resolution. - * `data_process_region.0.cloud_provider` - (Required) Name of the cloud service provider. Atlas Data Lake only supports AWS. - * `data_process_region.0.region` - (Required). Name of the region to which Data Lake routes client connections for data processing. Atlas Data Lake only supports the following regions: - * `SYDNEY_AUS` (ap-southeast-2) - * `FRANKFURT_DEU` (eu-central-1) - * `DUBLIN_IRL` (eu-west-1) - * `LONDON_GBR` (eu-west-2) - * `VIRGINIA_USA` (us-east-1) - * `OREGON_USA` (us-west-2) - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The Terraform's unique identifier used internally for state management. -* `aws.0.iam_assumed_role_arn` - Amazon Resource Name (ARN) of the IAM Role that Data Lake assumes when accessing S3 Bucket data stores. The IAM Role must support the following actions against each S3 bucket: - * `s3:GetObject` - * `s3:ListBucket` - * `s3:GetObjectVersion` - - For more information on S3 actions, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html). - -* `aws.0.iam_user_arn` - Amazon Resource Name (ARN) of the user that Data Lake assumes when accessing S3 Bucket data stores. -* `aws.0.external_id` - Unique identifier associated with the IAM Role that Data Lake assumes when accessing the data stores. -* `hostnames` - The list of hostnames assigned to the Atlas Data Lake. Each string in the array is a hostname assigned to the Atlas Data Lake. -* `state` - Current state of the Atlas Data Lake: - * `ACTIVE` - The Data Lake is active and verified. You can query the data stores associated with the Atlas Data Lake. -* `storage_databases` - Configuration details for mapping each data store to queryable databases and collections. For complete documentation on this object and its nested fields, see [databases](https://docs.mongodb.com/datalake/reference/format/data-lake-configuration#std-label-datalake-databases-reference). An empty object indicates that the Data Lake has no mapping configuration for any data store. - * `storage_databases.#.name` - Name of the database to which Data Lake maps the data contained in the data store. - * `storage_databases.#.collections` - Array of objects where each object represents a collection and data sources that map to a [stores](https://docs.mongodb.com/datalake/reference/format/data-lake-configuration#mongodb-datalakeconf-datalakeconf.stores) data store. - * `storage_databases.#.collections.#.name` - Name of the collection. - * `storage_databases.#.collections.#.data_sources` - Array of objects where each object represents a stores data store to map with the collection. - * `storage_databases.#.collections.#.data_sources.#.store_name` - Name of a data store to map to the ``. Must match the name of an object in the stores array. - * `storage_databases.#.collections.#.data_sources.#.default_format` - Default format that Data Lake assumes if it encounters a file without an extension while searching the storeName. - * `storage_databases.#.collections.#.data_sources.#.path` - Controls how Atlas Data Lake searches for and parses files in the storeName before mapping them to the ``. - * `storage_databases.#.views` - Array of objects where each object represents an [aggregation pipeline](https://docs.mongodb.com/manual/core/aggregation-pipeline/#id1) on a collection. To learn more about views, see [Views](https://docs.mongodb.com/manual/core/views/). - * `storage_databases.#.views.#.name` - Name of the view. - * `storage_databases.#.views.#.source` - Name of the source collection for the view. - * `storage_databases.#.views.#.pipeline`- Aggregation pipeline stage(s) to apply to the source collection. -* `storage_stores` - Each object in the array represents a data store. Data Lake uses the storage.databases configuration details to map data in each data store to queryable databases and collections. For complete documentation on this object and its nested fields, see [stores](https://docs.mongodb.com/datalake/reference/format/data-lake-configuration#std-label-datalake-stores-reference). An empty object indicates that the Data Lake has no configured data stores. - * `storage_stores.#.name` - Name of the data store. - * `storage_stores.#.provider` - Defines where the data is stored. - * `storage_stores.#.region` - Name of the AWS region in which the S3 bucket is hosted. - * `storage_stores.#.bucket` - Name of the AWS S3 bucket. - * `storage_stores.#.prefix` - Prefix Data Lake applies when searching for files in the S3 bucket . - * `storage_stores.#.delimiter` - The delimiter that separates `storage_databases.#.collections.#.data_sources.#.path` segments in the data store. - * `storage_stores.#.include_tags` - Determines whether or not to use S3 tags on the files in the given path as additional partition attributes. - -## Import - -Data Lake can be imported using project ID, name of the data lake and name of the AWS s3 bucket, in the format `project_id`--`name`--`aws_test_s3_bucket`, e.g. - -``` -$ terraform import mongodbatlas_data_lake.example 1112222b3bf99403840e8934--test-data-lake--s3-test -``` - -See [MongoDB Atlas API](https://docs.mongodb.com/datalake/reference/api/dataLakes-create-one-tenant) Documentation for more information. \ No newline at end of file