From 98dcb1b88c885ae8579d9afa2d8851a48b1d18fa Mon Sep 17 00:00:00 2001 From: Tim Curless Date: Fri, 24 Aug 2018 12:33:50 -0500 Subject: [PATCH 01/13] formatting --- azurerm/data_source_kubernetes_cluster.go | 40 +++++++++++----------- azurerm/resource_arm_kubernetes_cluster.go | 14 ++++---- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go index bdd5daae79c31..266e53b602fd8 100644 --- a/azurerm/data_source_kubernetes_cluster.go +++ b/azurerm/data_source_kubernetes_cluster.go @@ -40,8 +40,8 @@ func dataSourceArmKubernetesCluster() *schema.Resource { "enable_rbac": { Type: schema.TypeBool, - Computed: true, - }, + Computed: true, + }, "node_resource_group": { Type: schema.TypeString, @@ -177,33 +177,33 @@ func dataSourceArmKubernetesCluster() *schema.Resource { }, "aad_profile": { - Type: schema.TypeList, + Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "server_app_id": { - Type: schema.TypeString, - Computed: true, - }, - - "server_app_secret": { + "server_app_id": { + Type: schema.TypeString, + Computed: true, + }, + + "server_app_secret": { Type: schema.TypeString, Computed: true, Sensitive: true, }, - - "client_app_id": { - Type: schema.TypeString, + + "client_app_id": { + Type: schema.TypeString, Computed: true, }, - - "tenant_id": { - Type: schema.TypeString, + + "tenant_id": { + Type: schema.TypeString, Computed: true, - }, - }, - }, - }, + }, + }, + }, + }, "network_profile": { Type: schema.TypeList, @@ -214,7 +214,7 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, - + "service_cidr": { Type: schema.TypeString, Computed: true, diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 353c07e1f17b6..7fedc88b9d1d4 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -91,8 +91,8 @@ func resourceArmKubernetesCluster() *schema.Resource { Optional: true, ForceNew: true, Default: true, - }, - + }, + "node_resource_group": { Type: schema.TypeString, Computed: true, @@ -292,12 +292,12 @@ func resourceArmKubernetesCluster() *schema.Resource { "tenant_id": { Type: schema.TypeString, Required: true, - }, - }, - }, - }, + }, + }, + }, + }, - "network_profile": { + "network_profile": { Type: schema.TypeList, Optional: true, Computed: true, From 3fb0d8f8980a554d153d9a93d139d60fd64ebce6 Mon Sep 17 00:00:00 2001 From: Junyi Yi Date: Thu, 30 Aug 2018 04:01:55 -0700 Subject: [PATCH 02/13] Allow azurerm_function_app to use upper case names in consumption plan (#1835) * Add repro test case for #1765 * Lower-case the content share name to prevent Bad Request * Add link to the API request as TODO item. --- azurerm/resource_arm_function_app.go | 4 +- azurerm/resource_arm_function_app_test.go | 66 +++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/azurerm/resource_arm_function_app.go b/azurerm/resource_arm_function_app.go index 3183d6e154796..9941e1ca8a3cd 100644 --- a/azurerm/resource_arm_function_app.go +++ b/azurerm/resource_arm_function_app.go @@ -510,6 +510,8 @@ func resourceArmFunctionAppDelete(d *schema.ResourceData, meta interface{}) erro } func getBasicFunctionAppAppSettings(d *schema.ResourceData, appServiceTier string) []web.NameValuePair { + // TODO: This is a workaround since there are no public Functions API + // You may track the API request here: https://github.com/Azure/azure-rest-api-specs/issues/3750 dashboardPropName := "AzureWebJobsDashboard" storagePropName := "AzureWebJobsStorage" functionVersionPropName := "FUNCTIONS_EXTENSION_VERSION" @@ -518,7 +520,7 @@ func getBasicFunctionAppAppSettings(d *schema.ResourceData, appServiceTier strin storageConnection := d.Get("storage_connection_string").(string) functionVersion := d.Get("version").(string) - contentShare := d.Get("name").(string) + "-content" + contentShare := strings.ToLower(d.Get("name").(string)) + "-content" basicSettings := []web.NameValuePair{ {Name: &dashboardPropName, Value: &storageConnection}, diff --git a/azurerm/resource_arm_function_app_test.go b/azurerm/resource_arm_function_app_test.go index b7dd2d2197fe0..09b26c55c65de 100644 --- a/azurerm/resource_arm_function_app_test.go +++ b/azurerm/resource_arm_function_app_test.go @@ -338,6 +338,35 @@ func TestAccAzureRMFunctionApp_consumptionPlan(t *testing.T) { }) } +func TestAccAzureRMFunctionApp_consumptionPlanUppercaseName(t *testing.T) { + resourceName := "azurerm_function_app.test" + ri := acctest.RandInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + config := testAccAzureRMFunctionApp_consumptionPlanUppercaseName(ri, rs, location) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMFunctionAppDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMFunctionAppExists(resourceName), + testCheckAzureRMFunctionAppHasContentShare(resourceName), + resource.TestCheckResourceAttr(resourceName, "site_config.0.use_32_bit_worker_process", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMFunctionApp_createIdentity(t *testing.T) { resourceName := "azurerm_function_app.test" ri := acctest.RandInt() @@ -982,6 +1011,43 @@ resource "azurerm_function_app" "test" { `, rInt, location, rString, rInt, rInt) } +func testAccAzureRMFunctionApp_consumptionPlanUppercaseName(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_app_service_plan" "test" { + name = "acctestASP-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + kind = "FunctionApp" + + sku { + tier = "Dynamic" + size = "Y1" + } +} + +resource "azurerm_function_app" "test" { + name = "acctest-%d-FuncWithUppercase" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + app_service_plan_id = "${azurerm_app_service_plan.test.id}" + storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" +} +`, rInt, location, rString, rInt, rInt) +} + func testAccAzureRMFunctionApp_basicIdentity(rInt int, storage string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { From 7900b4043b15a31ea925c5f6ddf30e9ade4a6ead Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 12:02:25 +0100 Subject: [PATCH 03/13] Updating to include #1835 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8be325f6e9fc9..de04dc62d415e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ BUG FIXES: * `azurerm_eventhub_authorization_rule` - updating the validation error [GH-1795] * `azurerm_eventhub_consumer_group` - updating the validation to support periods, hyphens and underscores [GH-1795] * `azurerm_eventhub_namespace` - updating the validation error [GH-1795] +* `azurerm_function_app` - support for names in upper-case [GH-1835] * `azurerm_kubernetes_cluster` - removing validation for the `pod_cidr` field when `network_plugin` is set to `azure` [GH-1798] * `azurerm_virtual_machine` - setting the `image_uri` property within the `storage_os_disk` block [GH-1799] From 7b948641ba5e269286a70cbf6be9e82779afe85d Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 13:38:44 +0100 Subject: [PATCH 04/13] Storage: Import Support (#1816) * Storage Blob: updating the ID to allow importing * Storage Blob: Import support * Storage Container: updating the ID * Storage Container: support for import * Including `blob` in the uri * Storage Queue: fixing the id / supporting import * Storage Table: fixing the ID / import support ``` $ acctests azurerm TestAccAzureRMStorageTable_basic === RUN TestAccAzureRMStorageTable_basic --- PASS: TestAccAzureRMStorageTable_basic (89.22s) PASS ok github.com/terraform-providers/terraform-provider-azurerm/azurerm 89.596s ``` * Removing an unused comment * Setting the fields for the Blob/Container resources - Ensuring all the fields are set for the Blob and Container resources - Removing the separate Exists functions --- azurerm/resource_arm_storage_blob.go | 235 +++++++++++------- .../resource_arm_storage_blob_migration.go | 40 +++ ...esource_arm_storage_blob_migration_test.go | 67 +++++ azurerm/resource_arm_storage_blob_test.go | 61 +++-- azurerm/resource_arm_storage_container.go | 192 ++++++++------ ...esource_arm_storage_container_migration.go | 39 +++ ...ce_arm_storage_container_migration_test.go | 66 +++++ .../resource_arm_storage_container_test.go | 18 +- azurerm/resource_arm_storage_queue.go | 120 ++++++--- .../resource_arm_storage_queue_migration.go | 40 +++ ...source_arm_storage_queue_migration_test.go | 66 +++++ azurerm/resource_arm_storage_queue_test.go | 8 +- azurerm/resource_arm_storage_table.go | 111 +++++++-- .../resource_arm_storage_table_migration.go | 40 +++ ...source_arm_storage_table_migration_test.go | 66 +++++ azurerm/resource_arm_storage_table_test.go | 8 +- website/docs/r/storage_blob.html.markdown | 10 +- .../docs/r/storage_container.html.markdown | 10 +- website/docs/r/storage_queue.html.markdown | 10 +- website/docs/r/storage_table.html.markdown | 10 +- 20 files changed, 958 insertions(+), 259 deletions(-) create mode 100644 azurerm/resource_arm_storage_blob_migration.go create mode 100644 azurerm/resource_arm_storage_blob_migration_test.go create mode 100644 azurerm/resource_arm_storage_container_migration.go create mode 100644 azurerm/resource_arm_storage_container_migration_test.go create mode 100644 azurerm/resource_arm_storage_queue_migration.go create mode 100644 azurerm/resource_arm_storage_queue_migration_test.go create mode 100644 azurerm/resource_arm_storage_table_migration.go create mode 100644 azurerm/resource_arm_storage_table_migration_test.go diff --git a/azurerm/resource_arm_storage_blob.go b/azurerm/resource_arm_storage_blob.go index ca40247cae1f7..22af74b011c36 100644 --- a/azurerm/resource_arm_storage_blob.go +++ b/azurerm/resource_arm_storage_blob.go @@ -7,22 +7,28 @@ import ( "fmt" "io" "log" + "net/url" "os" "runtime" "strings" "sync" "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/terraform/helper/schema" ) func resourceArmStorageBlob() *schema.Resource { return &schema.Resource{ - Create: resourceArmStorageBlobCreate, - Read: resourceArmStorageBlobRead, - Update: resourceArmStorageBlobUpdate, - Exists: resourceArmStorageBlobExists, - Delete: resourceArmStorageBlobDelete, + Create: resourceArmStorageBlobCreate, + Read: resourceArmStorageBlobRead, + Update: resourceArmStorageBlobUpdate, + Delete: resourceArmStorageBlobDelete, + MigrateState: resourceStorageBlobMigrateState, + SchemaVersion: 1, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -127,8 +133,8 @@ func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors [] func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors []error) { value := strings.ToLower(v.(string)) validTypes := map[string]struct{}{ - "block": struct{}{}, - "page": struct{}{}, + "block": {}, + "page": {}, } if _, ok := validTypes[value]; !ok { @@ -140,6 +146,7 @@ func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors [] func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) error { armClient := meta.(*ArmClient) ctx := armClient.StopContext + env := armClient.environment resourceGroupName := d.Get("resource_group_name").(string) storageAccountName := d.Get("storage_account_name").(string) @@ -154,15 +161,16 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro name := d.Get("name").(string) blobType := d.Get("type").(string) - cont := d.Get("storage_container_name").(string) + containerName := d.Get("storage_container_name").(string) sourceUri := d.Get("source_uri").(string) contentType := d.Get("content_type").(string) - log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName) + log.Printf("[INFO] Creating blob %q in container %q within storage account %q", name, containerName, storageAccountName) + container := blobClient.GetContainerReference(containerName) + blob := container.GetBlobReference(name) + if sourceUri != "" { options := &storage.CopyOptions{} - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) err := blob.Copy(sourceUri, options) if err != nil { return fmt.Errorf("Error creating storage blob on Azure: %s", err) @@ -171,8 +179,6 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro switch strings.ToLower(blobType) { case "block": options := &storage.PutBlobOptions{} - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) err := blob.CreateBlockBlob(options) if err != nil { return fmt.Errorf("Error creating storage blob on Azure: %s", err) @@ -182,7 +188,8 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro if source != "" { parallelism := d.Get("parallelism").(int) attempts := d.Get("attempts").(int) - if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, contentType, blobClient, parallelism, attempts); err != nil { + + if err := resourceArmStorageBlobBlockUploadFromSource(containerName, name, source, contentType, blobClient, parallelism, attempts); err != nil { return fmt.Errorf("Error creating storage blob on Azure: %s", err) } } @@ -191,15 +198,14 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro if source != "" { parallelism := d.Get("parallelism").(int) attempts := d.Get("attempts").(int) - if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, contentType, blobClient, parallelism, attempts); err != nil { + + if err := resourceArmStorageBlobPageUploadFromSource(containerName, name, source, contentType, blobClient, parallelism, attempts); err != nil { return fmt.Errorf("Error creating storage blob on Azure: %s", err) } } else { size := int64(d.Get("size").(int)) options := &storage.PutBlobOptions{} - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) blob.Properties.ContentLength = size blob.Properties.ContentType = contentType err := blob.PutPageBlob(options) @@ -210,7 +216,9 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro } } - d.SetId(name) + // gives us https://example.blob.core.windows.net/container/file.vhd + id := fmt.Sprintf("https://%s.blob.%s/%s/%s", storageAccountName, env.StorageEndpointSuffix, containerName, name) + d.SetId(id) return resourceArmStorageBlobRead(d, meta) } @@ -539,22 +547,30 @@ func resourceArmStorageBlobUpdate(d *schema.ResourceData, meta interface{}) erro armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageBlobID(d.Id(), armClient.environment) + if err != nil { + return err + } - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) + if err != nil { + return err + } + + if resourceGroup == nil { + return fmt.Errorf("Unable to determine Resource Group for Storage Account %q", id.storageAccountName) + } + + blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) if err != nil { - return fmt.Errorf("Error getting storage account %s: %+v", storageAccountName, err) + return fmt.Errorf("Error getting storage account %s: %+v", id.storageAccountName, err) } if !accountExists { - return fmt.Errorf("Storage account %s not found in resource group %s", storageAccountName, resourceGroupName) + return fmt.Errorf("Storage account %s not found in resource group %s", id.storageAccountName, *resourceGroup) } - name := d.Get("name").(string) - storageContainerName := d.Get("storage_container_name").(string) - - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) + container := blobClient.GetContainerReference(id.containerName) + blob := container.GetBlobReference(id.blobName) if d.HasChange("content_type") { blob.Properties.ContentType = d.Get("content_type").(string) @@ -563,7 +579,7 @@ func resourceArmStorageBlobUpdate(d *schema.ResourceData, meta interface{}) erro options := &storage.SetBlobPropertiesOptions{} err = blob.SetProperties(options) if err != nil { - return fmt.Errorf("Error setting properties of blob %s (container %s, storage account %s): %+v", name, storageContainerName, storageAccountName, err) + return fmt.Errorf("Error setting properties of blob %s (container %s, storage account %s): %+v", id.blobName, id.containerName, id.storageAccountName, err) } return nil @@ -573,115 +589,162 @@ func resourceArmStorageBlobRead(d *schema.ResourceData, meta interface{}) error armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageBlobID(d.Id(), armClient.environment) + if err != nil { + return err + } - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) + if err != nil { + return err + } + + if resourceGroup == nil { + return fmt.Errorf("Unable to determine Resource Group for Storage Account %q", id.storageAccountName) + } + + blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) if err != nil { return err } if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id()) + log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", id.storageAccountName, d.Id()) d.SetId("") return nil } - exists, err := resourceArmStorageBlobExists(d, meta) + log.Printf("[INFO] Checking for existence of storage blob %q in container %q.", id.blobName, id.containerName) + container := blobClient.GetContainerReference(id.containerName) + blob := container.GetBlobReference(id.blobName) + exists, err := blob.Exists() if err != nil { - return err + return fmt.Errorf("error checking for existence of storage blob %q: %s", id.blobName, err) } if !exists { - // Exists already removed this from state + log.Printf("[INFO] Storage blob %q no longer exists, removing from state...", id.blobName) + d.SetId("") return nil } - name := d.Get("name").(string) - storageContainerName := d.Get("storage_container_name").(string) - - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - options := &storage.GetBlobPropertiesOptions{} err = blob.GetProperties(options) if err != nil { - return fmt.Errorf("Error getting properties of blob %s (container %s, storage account %s): %+v", name, storageContainerName, storageAccountName, err) + return fmt.Errorf("Error getting properties of blob %s (container %s, storage account %s): %+v", id.blobName, id.containerName, id.storageAccountName, err) } + + d.Set("name", id.blobName) + d.Set("storage_container_name", id.containerName) + d.Set("storage_account_name", id.storageAccountName) + d.Set("resource_group_name", resourceGroup) + d.Set("content_type", blob.Properties.ContentType) + d.Set("source_uri", blob.Properties.CopySource) + + blobType := strings.ToLower(strings.Replace(string(blob.Properties.BlobType), "Blob", "", 1)) + d.Set("type", blobType) + url := blob.GetURL() if url == "" { - log.Printf("[INFO] URL for %q is empty", name) + log.Printf("[INFO] URL for %q is empty", id.blobName) } d.Set("url", url) return nil } -func resourceArmStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) { +func resourceArmStorageBlobDelete(d *schema.ResourceData, meta interface{}) error { armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageBlobID(d.Id(), armClient.environment) + if err != nil { + return err + } - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) if err != nil { - return false, err + return fmt.Errorf("Unable to determine Resource Group for Storage Account %q: %+v", id.storageAccountName, err) } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id()) - d.SetId("") - return false, nil + if resourceGroup == nil { + log.Printf("[INFO] Resource Group doesn't exist so the blob won't exist") + return nil } - name := d.Get("name").(string) - storageContainerName := d.Get("storage_container_name").(string) - - log.Printf("[INFO] Checking for existence of storage blob %q.", name) - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - exists, err := blob.Exists() + blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) if err != nil { - return false, fmt.Errorf("error testing existence of storage blob %q: %s", name, err) + return err + } + if !accountExists { + log.Printf("[INFO] Storage Account %q doesn't exist so the blob won't exist", id.storageAccountName) + return nil } - if !exists { - log.Printf("[INFO] Storage blob %q no longer exists, removing from state...", name) - d.SetId("") + log.Printf("[INFO] Deleting storage blob %q", id.blobName) + options := &storage.DeleteBlobOptions{} + container := blobClient.GetContainerReference(id.containerName) + blob := container.GetBlobReference(id.blobName) + _, err = blob.DeleteIfExists(options) + if err != nil { + return fmt.Errorf("Error deleting storage blob %q: %s", id.blobName, err) } - return exists, nil + return nil } -func resourceArmStorageBlobDelete(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - ctx := armClient.StopContext - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) +type storageBlobId struct { + storageAccountName string + containerName string + blobName string +} - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName) +func parseStorageBlobID(input string, environment azure.Environment) (*storageBlobId, error) { + uri, err := url.Parse(input) if err != nil { - return err + return nil, fmt.Errorf("Error parsing %q as URI: %+v", input, err) } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName) - return nil + + // trim the leading `/` + segments := strings.Split(strings.TrimPrefix(uri.Path, "/"), "/") + if len(segments) < 2 { + return nil, fmt.Errorf("Expected number of segments in the path to be < 2 but got %d", len(segments)) } - name := d.Get("name").(string) - storageContainerName := d.Get("storage_container_name").(string) + storageAccountName := strings.Replace(uri.Host, fmt.Sprintf(".blob.%s", environment.StorageEndpointSuffix), "", 1) + containerName := segments[0] + blobName := strings.TrimPrefix(uri.Path, fmt.Sprintf("/%s/", containerName)) - log.Printf("[INFO] Deleting storage blob %q", name) - options := &storage.DeleteBlobOptions{} - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - _, err = blob.DeleteIfExists(options) + id := storageBlobId{ + storageAccountName: storageAccountName, + containerName: containerName, + blobName: blobName, + } + return &id, nil +} + +func determineResourceGroupForStorageAccount(accountName string, client *ArmClient) (*string, error) { + storageClient := client.storageServiceClient + ctx := client.StopContext + + // first locate which resource group the storage account is in + groupsResp, err := storageClient.List(ctx) if err != nil { - return fmt.Errorf("Error deleting storage blob %q: %s", name, err) + return nil, fmt.Errorf("Error loading the Resource Groups for Storage Account %q: %+v", accountName, err) } - d.SetId("") - return nil + if groups := groupsResp.Value; groups != nil { + for _, group := range *groups { + if group.Name != nil && *group.Name == accountName { + groupId, err := parseAzureResourceID(*group.ID) + if err != nil { + return nil, err + } + + return &groupId.ResourceGroup, nil + } + } + } + + return nil, nil } diff --git a/azurerm/resource_arm_storage_blob_migration.go b/azurerm/resource_arm_storage_blob_migration.go new file mode 100644 index 0000000000000..f0e643a58ad81 --- /dev/null +++ b/azurerm/resource_arm_storage_blob_migration.go @@ -0,0 +1,40 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceStorageBlobMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AzureRM Storage Blob State v0; migrating to v1") + return migrateStorageBlobStateV0toV1(is, meta) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStorageBlobStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] ARM Storage Blob Attributes before Migration: %#v", is.Attributes) + + environment := meta.(*ArmClient).environment + + blobName := is.Attributes["name"] + containerName := is.Attributes["storage_container_name"] + storageAccountName := is.Attributes["storage_account_name"] + newID := fmt.Sprintf("https://%s.blob.%s/%s/%s", storageAccountName, environment.StorageEndpointSuffix, containerName, blobName) + is.Attributes["id"] = newID + is.ID = newID + + log.Printf("[DEBUG] ARM Storage Blob Attributes after State Migration: %#v", is.Attributes) + + return is, nil +} diff --git a/azurerm/resource_arm_storage_blob_migration_test.go b/azurerm/resource_arm_storage_blob_migration_test.go new file mode 100644 index 0000000000000..bcb5b4f4c8292 --- /dev/null +++ b/azurerm/resource_arm_storage_blob_migration_test.go @@ -0,0 +1,67 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +// NOTE: this is intentionally an acceptance test (and we're not explicitly setting the env) +// as we want to run this depending on the cloud we're in. +func TestAccAzureRMStorageBlobMigrateState(t *testing.T) { + config := testGetAzureConfig(t) + if config == nil { + t.SkipNow() + return + } + + client, err := getArmClient(config) + if err != nil { + t.Fatal(fmt.Errorf("Error building ARM Client: %+v", err)) + return + } + + client.StopContext = testAccProvider.StopContext() + + suffix := client.environment.StorageEndpointSuffix + + cases := map[string]struct { + StateVersion int + ID string + InputAttributes map[string]string + ExpectedAttributes map[string]string + }{ + "v0_1_without_value": { + StateVersion: 0, + ID: "some_id", + InputAttributes: map[string]string{ + "name": "blob.vhd", + "storage_container_name": "container", + "storage_account_name": "example", + }, + ExpectedAttributes: map[string]string{ + "id": fmt.Sprintf("https://example.blob.%s/container/blob.vhd", suffix), + }, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.InputAttributes, + } + is, err := resourceStorageBlobMigrateState(tc.StateVersion, is, client) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.ExpectedAttributes { + actual := is.Attributes[k] + if actual != v { + t.Fatalf("Bad Storage Blob Migrate for %q: %q\n\n expected: %q", k, actual, v) + } + } + } +} diff --git a/azurerm/resource_arm_storage_blob_test.go b/azurerm/resource_arm_storage_blob_test.go index 116a70fa7cece..324f8179f55d4 100644 --- a/azurerm/resource_arm_storage_blob_test.go +++ b/azurerm/resource_arm_storage_blob_test.go @@ -144,6 +144,7 @@ func TestResourceAzureRMStorageBlobAttempts_validation(t *testing.T) { } func TestAccAzureRMStorageBlob_basic(t *testing.T) { + resourceName := "azurerm_storage_blob.test" ri := acctest.RandInt() rs := strings.ToLower(acctest.RandString(11)) config := testAccAzureRMStorageBlob_basic(ri, rs, testLocation()) @@ -156,14 +157,21 @@ func TestAccAzureRMStorageBlob_basic(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobExists("azurerm_storage_blob.test"), + testCheckAzureRMStorageBlobExists(resourceName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"attempts", "parallelism", "size", "type"}, + }, }, }) } func TestAccAzureRMStorageBlob_disappears(t *testing.T) { + resourceName := "azurerm_storage_blob.test" ri := acctest.RandInt() rs := strings.ToLower(acctest.RandString(11)) config := testAccAzureRMStorageBlob_basic(ri, rs, testLocation()) @@ -176,8 +184,8 @@ func TestAccAzureRMStorageBlob_disappears(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobExists("azurerm_storage_blob.test"), - testCheckAzureRMStorageBlobDisappears("azurerm_storage_blob.test"), + testCheckAzureRMStorageBlobExists(resourceName), + testCheckAzureRMStorageBlobDisappears(resourceName), ), ExpectNonEmptyPlan: true, }, @@ -221,6 +229,7 @@ func TestAccAzureRMStorageBlobBlock_source(t *testing.T) { } func TestAccAzureRMStorageBlobPage_source(t *testing.T) { + resourceName := "azurerm_storage_blob.source" ri := acctest.RandInt() rs := strings.ToLower(acctest.RandString(11)) sourceBlob, err := ioutil.TempFile("", "") @@ -272,7 +281,7 @@ func TestAccAzureRMStorageBlobPage_source(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.source", storage.BlobTypePage, sourceBlob.Name()), + testCheckAzureRMStorageBlobMatchesFile(resourceName, storage.BlobTypePage, sourceBlob.Name()), ), }, }, @@ -280,6 +289,7 @@ func TestAccAzureRMStorageBlobPage_source(t *testing.T) { } func TestAccAzureRMStorageBlob_source_uri(t *testing.T) { + resourceName := "azurerm_storage_blob.destination" ri := acctest.RandInt() rs := strings.ToLower(acctest.RandString(11)) sourceBlob, err := ioutil.TempFile("", "") @@ -307,9 +317,15 @@ func TestAccAzureRMStorageBlob_source_uri(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.destination", storage.BlobTypeBlock, sourceBlob.Name()), + testCheckAzureRMStorageBlobMatchesFile(resourceName, storage.BlobTypeBlock, sourceBlob.Name()), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"attempts", "parallelism", "size", "type"}, + }, }, }) } @@ -683,31 +699,32 @@ resource "azurerm_storage_account" "source" { } resource "azurerm_storage_container" "source" { - name = "source" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - container_access_type = "blob" + name = "source" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + container_access_type = "blob" } resource "azurerm_storage_blob" "source" { - name = "source.vhd" + name = "source.vhd" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - storage_container_name = "${azurerm_storage_container.source.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + storage_container_name = "${azurerm_storage_container.source.name}" - type = "block" - source = "%s" - parallelism = 4 - attempts = 2 + type = "block" + source = "%s" + parallelism = 4 + attempts = 2 } resource "azurerm_storage_blob" "destination" { - name = "destination.vhd" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - storage_container_name = "${azurerm_storage_container.source.name}" - source_uri = "${azurerm_storage_blob.source.url}" + name = "destination.vhd" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + storage_container_name = "${azurerm_storage_container.source.name}" + source_uri = "${azurerm_storage_blob.source.url}" + type = "block" } `, rInt, location, rString, sourceBlobName) } diff --git a/azurerm/resource_arm_storage_container.go b/azurerm/resource_arm_storage_container.go index c028f69dcac1e..2fcd0b394e3b8 100644 --- a/azurerm/resource_arm_storage_container.go +++ b/azurerm/resource_arm_storage_container.go @@ -3,22 +3,27 @@ package azurerm import ( "fmt" "log" + "net/url" + "regexp" "strings" "time" - "regexp" - "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" ) func resourceArmStorageContainer() *schema.Resource { return &schema.Resource{ - Create: resourceArmStorageContainerCreate, - Read: resourceArmStorageContainerRead, - Exists: resourceArmStorageContainerExists, - Delete: resourceArmStorageContainerDelete, + Create: resourceArmStorageContainerCreate, + Read: resourceArmStorageContainerRead, + Delete: resourceArmStorageContainerDelete, + MigrateState: resourceStorageContainerMigrateState, + SchemaVersion: 1, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -40,6 +45,7 @@ func resourceArmStorageContainer() *schema.Resource { Default: "private", ValidateFunc: validateArmStorageContainerAccessType, }, + "properties": { Type: schema.TypeMap, Computed: true, @@ -85,6 +91,7 @@ func resourceArmStorageContainerCreate(d *schema.ResourceData, meta interface{}) armClient := meta.(*ArmClient) ctx := armClient.StopContext + name := d.Get("name").(string) resourceGroupName := d.Get("resource_group_name").(string) storageAccountName := d.Get("storage_account_name").(string) @@ -96,8 +103,6 @@ func resourceArmStorageContainerCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Storage Account %q Not Found", storageAccountName) } - name := d.Get("name").(string) - var accessType storage.ContainerAccessType if d.Get("container_access_type").(string) == "private" { accessType = storage.ContainerAccessType("") @@ -122,105 +127,87 @@ func resourceArmStorageContainerCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting permissions for container %s in storage account %s: %+v", name, storageAccountName, err) } - d.SetId(name) + id := fmt.Sprintf("https://%s.blob.%s/%s", storageAccountName, armClient.environment.StorageEndpointSuffix, name) + d.SetId(id) return resourceArmStorageContainerRead(d, meta) } -func checkContainerIsCreated(reference *storage.Container) func() *resource.RetryError { - return func() *resource.RetryError { - createOptions := &storage.CreateContainerOptions{} - _, err := reference.CreateIfNotExists(createOptions) - if err != nil { - return resource.RetryableError(err) - } - - return nil - } -} - // resourceAzureStorageContainerRead does all the necessary API calls to // read the status of the storage container off Azure. func resourceArmStorageContainerRead(d *schema.ResourceData, meta interface{}) error { armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageContainerID(d.Id(), armClient.environment) + if err != nil { + return err + } - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) + if err != nil { + return err + } + if resourceGroup == nil { + log.Printf("Cannot locate Resource Group for Storage Account %q (presuming it's gone) - removing from state", id.storageAccountName) + d.SetId("") + return nil + } + + blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) if err != nil { return err } if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing container %q from state", storageAccountName, d.Id()) + log.Printf("[DEBUG] Storage account %q not found, removing container %q from state", id.storageAccountName, d.Id()) d.SetId("") return nil } - name := d.Get("name").(string) containers, err := blobClient.ListContainers(storage.ListContainersParameters{ - Prefix: name, + Prefix: id.containerName, Timeout: 90, }) if err != nil { - return fmt.Errorf("Failed to retrieve storage containers in account %q: %s", name, err) + return fmt.Errorf("Failed to retrieve storage containers in account %q: %s", id.containerName, err) } - var found bool + var container *storage.Container for _, cont := range containers.Containers { - if cont.Name == name { - found = true - - props := make(map[string]interface{}) - props["last_modified"] = cont.Properties.LastModified - props["lease_status"] = cont.Properties.LeaseStatus - props["lease_state"] = cont.Properties.LeaseState - props["lease_duration"] = cont.Properties.LeaseDuration - - d.Set("properties", props) + if cont.Name == id.containerName { + container = &cont + break } } - if !found { - log.Printf("[INFO] Storage container %q does not exist in account %q, removing from state...", name, storageAccountName) + if container == nil { + log.Printf("[INFO] Storage container %q does not exist in account %q, removing from state...", id.containerName, id.storageAccountName) d.SetId("") + return nil } - return nil -} - -func resourceArmStorageContainerExists(d *schema.ResourceData, meta interface{}) (bool, error) { - armClient := meta.(*ArmClient) - ctx := armClient.StopContext - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + d.Set("name", id.containerName) + d.Set("storage_account_name", id.storageAccountName) + d.Set("resource_group_name", resourceGroup) - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName) - if err != nil { - return false, err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing container %q from state", storageAccountName, d.Id()) - d.SetId("") - return false, nil + // for historical reasons, "private" above is an empty string in the API + if container.Properties.PublicAccess == storage.ContainerAccessTypePrivate { + d.Set("container_access_type", "private") + } else { + d.Set("container_access_type", string(container.Properties.PublicAccess)) } - name := d.Get("name").(string) + output := make(map[string]interface{}) - log.Printf("[INFO] Checking existence of storage container %q in storage account %q", name, storageAccountName) - reference := blobClient.GetContainerReference(name) - exists, err := reference.Exists() - if err != nil { - return false, fmt.Errorf("Error querying existence of storage container %q in storage account %q: %s", name, storageAccountName, err) - } + output["last_modified"] = container.Properties.LastModified + output["lease_status"] = container.Properties.LeaseStatus + output["lease_state"] = container.Properties.LeaseState + output["lease_duration"] = container.Properties.LeaseDuration - if !exists { - log.Printf("[INFO] Storage container %q does not exist in account %q, removing from state...", name, storageAccountName) - d.SetId("") + if err := d.Set("properties", output); err != nil { + return fmt.Errorf("Error flattening `properties`: %+v", err) } - return exists, nil + return nil } // resourceAzureStorageContainerDelete does all the necessary API calls to @@ -229,27 +216,74 @@ func resourceArmStorageContainerDelete(d *schema.ResourceData, meta interface{}) armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageContainerID(d.Id(), armClient.environment) + if err != nil { + return err + } - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) if err != nil { return err } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the container won't exist", storageAccountName) + if resourceGroup == nil { + log.Printf("Cannot locate Resource Group for Storage Account %q (presuming it's gone) - removing from state", id.storageAccountName) return nil } - name := d.Get("name").(string) + blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) + if err != nil { + return err + } + if !accountExists { + log.Printf("[INFO] Storage Account %q doesn't exist so the container won't exist", id.storageAccountName) + return nil + } - log.Printf("[INFO] Deleting storage container %q in account %q", name, storageAccountName) - reference := blobClient.GetContainerReference(name) + log.Printf("[INFO] Deleting storage container %q in account %q", id.containerName, id.storageAccountName) + reference := blobClient.GetContainerReference(id.containerName) deleteOptions := &storage.DeleteContainerOptions{} if _, err := reference.DeleteIfExists(deleteOptions); err != nil { - return fmt.Errorf("Error deleting storage container %q from storage account %q: %s", name, storageAccountName, err) + return fmt.Errorf("Error deleting storage container %q from storage account %q: %s", id.containerName, id.storageAccountName, err) } - d.SetId("") return nil } + +func checkContainerIsCreated(reference *storage.Container) func() *resource.RetryError { + return func() *resource.RetryError { + createOptions := &storage.CreateContainerOptions{} + _, err := reference.CreateIfNotExists(createOptions) + if err != nil { + return resource.RetryableError(err) + } + + return nil + } +} + +type storageContainerId struct { + storageAccountName string + containerName string +} + +func parseStorageContainerID(input string, environment azure.Environment) (*storageContainerId, error) { + uri, err := url.Parse(input) + if err != nil { + return nil, fmt.Errorf("Error parsing %q as URI: %+v", input, err) + } + + // remove the leading `/` + segments := strings.Split(strings.TrimPrefix(uri.Path, "/"), "/") + if len(segments) < 1 { + return nil, fmt.Errorf("Expected number of segments in the path to be < 1 but got %d", len(segments)) + } + + storageAccountName := strings.Replace(uri.Host, fmt.Sprintf(".blob.%s", environment.StorageEndpointSuffix), "", 1) + containerName := segments[0] + + id := storageContainerId{ + storageAccountName: storageAccountName, + containerName: containerName, + } + return &id, nil +} diff --git a/azurerm/resource_arm_storage_container_migration.go b/azurerm/resource_arm_storage_container_migration.go new file mode 100644 index 0000000000000..c8ed4d5a7cd99 --- /dev/null +++ b/azurerm/resource_arm_storage_container_migration.go @@ -0,0 +1,39 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceStorageContainerMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AzureRM Storage Container State v0; migrating to v1") + return migrateStorageContainerStateV0toV1(is, meta) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStorageContainerStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] ARM Storage Container Attributes before Migration: %#v", is.Attributes) + + environment := meta.(*ArmClient).environment + + containerName := is.Attributes["name"] + storageAccountName := is.Attributes["storage_account_name"] + newID := fmt.Sprintf("https://%s.blob.%s/%s", storageAccountName, environment.StorageEndpointSuffix, containerName) + is.Attributes["id"] = newID + is.ID = newID + + log.Printf("[DEBUG] ARM Storage Container Attributes after State Migration: %#v", is.Attributes) + + return is, nil +} diff --git a/azurerm/resource_arm_storage_container_migration_test.go b/azurerm/resource_arm_storage_container_migration_test.go new file mode 100644 index 0000000000000..ce1eb4658b230 --- /dev/null +++ b/azurerm/resource_arm_storage_container_migration_test.go @@ -0,0 +1,66 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +// NOTE: this is intentionally an acceptance test (and we're not explicitly setting the env) +// as we want to run this depending on the cloud we're in. +func TestAccAzureRMStorageContainerMigrateState(t *testing.T) { + config := testGetAzureConfig(t) + if config == nil { + t.SkipNow() + return + } + + client, err := getArmClient(config) + if err != nil { + t.Fatal(fmt.Errorf("Error building ARM Client: %+v", err)) + return + } + + client.StopContext = testAccProvider.StopContext() + + suffix := client.environment.StorageEndpointSuffix + + cases := map[string]struct { + StateVersion int + ID string + InputAttributes map[string]string + ExpectedAttributes map[string]string + }{ + "v0_1_without_value": { + StateVersion: 0, + ID: "some_id", + InputAttributes: map[string]string{ + "name": "container", + "storage_account_name": "example", + }, + ExpectedAttributes: map[string]string{ + "id": fmt.Sprintf("https://example.blob.%s/container", suffix), + }, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.InputAttributes, + } + is, err := resourceStorageContainerMigrateState(tc.StateVersion, is, client) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.ExpectedAttributes { + actual := is.Attributes[k] + if actual != v { + t.Fatalf("Bad Storage Container Migrate for %q: %q\n\n expected: %q", k, actual, v) + } + } + } +} diff --git a/azurerm/resource_arm_storage_container_test.go b/azurerm/resource_arm_storage_container_test.go index b844000ada76b..8b841cd14a646 100644 --- a/azurerm/resource_arm_storage_container_test.go +++ b/azurerm/resource_arm_storage_container_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccAzureRMStorageContainer_basic(t *testing.T) { + resourceName := "azurerm_storage_container.test" var c storage.Container ri := acctest.RandInt() @@ -27,9 +28,14 @@ func TestAccAzureRMStorageContainer_basic(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageContainerExists("azurerm_storage_container.test", &c), + testCheckAzureRMStorageContainerExists(resourceName, &c), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -59,6 +65,7 @@ func TestAccAzureRMStorageContainer_disappears(t *testing.T) { } func TestAccAzureRMStorageContainer_root(t *testing.T) { + resourceName := "azurerm_storage_container.test" var c storage.Container ri := acctest.RandInt() @@ -73,10 +80,15 @@ func TestAccAzureRMStorageContainer_root(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageContainerExists("azurerm_storage_container.test", &c), - resource.TestCheckResourceAttr("azurerm_storage_container.test", "name", "$root"), + testCheckAzureRMStorageContainerExists(resourceName, &c), + resource.TestCheckResourceAttr(resourceName, "name", "$root"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } diff --git a/azurerm/resource_arm_storage_queue.go b/azurerm/resource_arm_storage_queue.go index d6c58ae9a2073..8f3bcf9cf3fd5 100644 --- a/azurerm/resource_arm_storage_queue.go +++ b/azurerm/resource_arm_storage_queue.go @@ -3,7 +3,9 @@ package azurerm import ( "fmt" "log" + "net/url" "regexp" + "strings" "github.com/Azure/azure-sdk-for-go/storage" "github.com/hashicorp/terraform/helper/schema" @@ -13,8 +15,12 @@ func resourceArmStorageQueue() *schema.Resource { return &schema.Resource{ Create: resourceArmStorageQueueCreate, Read: resourceArmStorageQueueRead, - Exists: resourceArmStorageQueueExists, Delete: resourceArmStorageQueueDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, + MigrateState: resourceStorageQueueMigrateState, Schema: map[string]*schema.Schema{ "name": { @@ -65,7 +71,9 @@ func validateArmStorageQueueName(v interface{}, k string) (ws []string, errors [ func resourceArmStorageQueueCreate(d *schema.ResourceData, meta interface{}) error { armClient := meta.(*ArmClient) ctx := armClient.StopContext + environment := armClient.environment + name := d.Get("name").(string) resourceGroupName := d.Get("resource_group_name").(string) storageAccountName := d.Get("storage_account_name").(string) @@ -77,8 +85,6 @@ func resourceArmStorageQueueCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Storage Account %q Not Found", storageAccountName) } - name := d.Get("name").(string) - log.Printf("[INFO] Creating queue %q in storage account %q", name, storageAccountName) queueReference := queueClient.GetQueueReference(name) options := &storage.QueueServiceOptions{} @@ -87,84 +93,122 @@ func resourceArmStorageQueueCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error creating storage queue on Azure: %s", err) } - d.SetId(name) + id := fmt.Sprintf("https://%s.queue.%s/%s", storageAccountName, environment.StorageEndpointSuffix, name) + d.SetId(id) return resourceArmStorageQueueRead(d, meta) } func resourceArmStorageQueueRead(d *schema.ResourceData, meta interface{}) error { + armClient := meta.(*ArmClient) + ctx := armClient.StopContext - exists, err := resourceArmStorageQueueExists(d, meta) + id, err := parseStorageQueueID(d.Id()) if err != nil { return err } - if !exists { - // Exists already removed this from state - return nil + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) + if err != nil { + return err } - return nil -} - -func resourceArmStorageQueueExists(d *schema.ResourceData, meta interface{}) (bool, error) { - armClient := meta.(*ArmClient) - ctx := armClient.StopContext - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + if resourceGroup == nil { + log.Printf("[WARN] Unable to determine Resource Group for Storage Account %q (assuming removed) - removing from state", id.storageAccountName) + d.SetId("") + return nil + } - queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) if err != nil { - return false, err + return err } if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing queue %q from state", storageAccountName, d.Id()) + log.Printf("[DEBUG] Storage account %q not found, removing queue %q from state", id.storageAccountName, id.queueName) d.SetId("") - return false, nil + return nil } - name := d.Get("name").(string) - - log.Printf("[INFO] Checking for existence of storage queue %q.", name) - queueReference := queueClient.GetQueueReference(name) + log.Printf("[INFO] Checking for existence of storage queue %q.", id.queueName) + queueReference := queueClient.GetQueueReference(id.queueName) exists, err := queueReference.Exists() if err != nil { - return false, fmt.Errorf("error testing existence of storage queue %q: %s", name, err) + return fmt.Errorf("error checking if storage queue %q exists: %s", id.queueName, err) } if !exists { - log.Printf("[INFO] Storage queue %q no longer exists, removing from state...", name) + log.Printf("[INFO] Storage queue %q no longer exists, removing from state...", id.queueName) d.SetId("") + return nil } - return exists, nil + d.Set("name", id.queueName) + d.Set("storage_account_name", id.storageAccountName) + d.Set("resource_group_name", *resourceGroup) + + return nil } func resourceArmStorageQueueDelete(d *schema.ResourceData, meta interface{}) error { armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageQueueID(d.Id()) + if err != nil { + return err + } - queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) if err != nil { return err } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName) + + if resourceGroup == nil { + log.Printf("[WARN] Unable to determine Resource Group for Storage Account %q (assuming removed) - removing from state", id.storageAccountName) return nil } - name := d.Get("name").(string) + queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) + if err != nil { + return err + } + if !accountExists { + log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", id.storageAccountName) + return nil + } - log.Printf("[INFO] Deleting storage queue %q", name) - queueReference := queueClient.GetQueueReference(name) + log.Printf("[INFO] Deleting storage queue %q", id.queueName) + queueReference := queueClient.GetQueueReference(id.queueName) options := &storage.QueueServiceOptions{} if err = queueReference.Delete(options); err != nil { - return fmt.Errorf("Error deleting storage queue %q: %s", name, err) + return fmt.Errorf("Error deleting storage queue %q: %s", id.queueName, err) } - d.SetId("") return nil } + +type storageQueueId struct { + storageAccountName string + queueName string +} + +func parseStorageQueueID(input string) (*storageQueueId, error) { + // https://myaccount.queue.core.windows.net/myqueue + uri, err := url.Parse(input) + if err != nil { + return nil, fmt.Errorf("Error parsing %q as a URI: %+v", input, err) + } + + segments := strings.Split(uri.Host, ".") + if len(segments) > 0 { + storageAccountName := segments[0] + // remove the leading `/` + queue := strings.TrimPrefix(uri.Path, "/") + id := storageQueueId{ + storageAccountName: storageAccountName, + queueName: queue, + } + return &id, nil + } + + return nil, nil +} diff --git a/azurerm/resource_arm_storage_queue_migration.go b/azurerm/resource_arm_storage_queue_migration.go new file mode 100644 index 0000000000000..db83cb1b5f7df --- /dev/null +++ b/azurerm/resource_arm_storage_queue_migration.go @@ -0,0 +1,40 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceStorageQueueMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AzureRM Storage Queue State v0; migrating to v1") + return migrateStorageQueueStateV0toV1(is, meta) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStorageQueueStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] ARM Storage Queue Attributes before Migration: %#v", is.Attributes) + + environment := meta.(*ArmClient).environment + + queueName := is.Attributes["name"] + storageAccountName := is.Attributes["storage_account_name"] + newID := fmt.Sprintf("https://%s.queue.%s/%s", storageAccountName, environment.StorageEndpointSuffix, queueName) + is.Attributes["id"] = newID + is.ID = newID + + log.Printf("[DEBUG] ARM Storage Queue Attributes after State Migration: %#v", is.Attributes) + + return is, nil +} diff --git a/azurerm/resource_arm_storage_queue_migration_test.go b/azurerm/resource_arm_storage_queue_migration_test.go new file mode 100644 index 0000000000000..5399acef9d5f7 --- /dev/null +++ b/azurerm/resource_arm_storage_queue_migration_test.go @@ -0,0 +1,66 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +// NOTE: this is intentionally an acceptance test (and we're not explicitly setting the env) +// as we want to run this depending on the cloud we're in. +func TestAccAzureRMStorageQueueMigrateState(t *testing.T) { + config := testGetAzureConfig(t) + if config == nil { + t.SkipNow() + return + } + + client, err := getArmClient(config) + if err != nil { + t.Fatal(fmt.Errorf("Error building ARM Client: %+v", err)) + return + } + + client.StopContext = testAccProvider.StopContext() + + suffix := client.environment.StorageEndpointSuffix + + cases := map[string]struct { + StateVersion int + ID string + InputAttributes map[string]string + ExpectedAttributes map[string]string + }{ + "v0_1_without_value": { + StateVersion: 0, + ID: "some_id", + InputAttributes: map[string]string{ + "name": "queue", + "storage_account_name": "example", + }, + ExpectedAttributes: map[string]string{ + "id": fmt.Sprintf("https://example.queue.%s/queue", suffix), + }, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.InputAttributes, + } + is, err := resourceStorageQueueMigrateState(tc.StateVersion, is, client) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.ExpectedAttributes { + actual := is.Attributes[k] + if actual != v { + t.Fatalf("Bad Storage Queue Migrate for %q: %q\n\n expected: %q", k, actual, v) + } + } + } +} diff --git a/azurerm/resource_arm_storage_queue_test.go b/azurerm/resource_arm_storage_queue_test.go index 223835c15814b..caec84139db03 100644 --- a/azurerm/resource_arm_storage_queue_test.go +++ b/azurerm/resource_arm_storage_queue_test.go @@ -51,6 +51,7 @@ func TestResourceAzureRMStorageQueueName_Validation(t *testing.T) { } func TestAccAzureRMStorageQueue_basic(t *testing.T) { + resourceName := "azurerm_storage_queue.test" ri := acctest.RandInt() rs := strings.ToLower(acctest.RandString(11)) config := testAccAzureRMStorageQueue_basic(ri, rs, testLocation()) @@ -63,9 +64,14 @@ func TestAccAzureRMStorageQueue_basic(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageQueueExists("azurerm_storage_queue.test"), + testCheckAzureRMStorageQueueExists(resourceName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } diff --git a/azurerm/resource_arm_storage_table.go b/azurerm/resource_arm_storage_table.go index e5cc2e14e10ab..63ab4fdc7d048 100644 --- a/azurerm/resource_arm_storage_table.go +++ b/azurerm/resource_arm_storage_table.go @@ -3,7 +3,9 @@ package azurerm import ( "fmt" "log" + "net/url" "regexp" + "strings" "github.com/Azure/azure-sdk-for-go/storage" "github.com/hashicorp/terraform/helper/schema" @@ -14,6 +16,11 @@ func resourceArmStorageTable() *schema.Resource { Create: resourceArmStorageTableCreate, Read: resourceArmStorageTableRead, Delete: resourceArmStorageTableDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, + MigrateState: resourceStorageTableMigrateState, Schema: map[string]*schema.Schema{ "name": { @@ -51,7 +58,9 @@ func validateArmStorageTableName(v interface{}, k string) (ws []string, errors [ func resourceArmStorageTableCreate(d *schema.ResourceData, meta interface{}) error { armClient := meta.(*ArmClient) ctx := armClient.StopContext + environment := armClient.environment + name := d.Get("name").(string) resourceGroupName := d.Get("resource_group_name").(string) storageAccountName := d.Get("storage_account_name").(string) @@ -63,7 +72,6 @@ func resourceArmStorageTableCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Storage Account %q Not Found", storageAccountName) } - name := d.Get("name").(string) table := tableClient.GetTableReference(name) log.Printf("[INFO] Creating table %q in storage account %q.", name, storageAccountName) @@ -75,8 +83,8 @@ func resourceArmStorageTableCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error creating table %q in storage account %q: %s", name, storageAccountName, err) } - d.SetId(name) - + id := fmt.Sprintf("https://%s.table.%s/%s", storageAccountName, environment.StorageEndpointSuffix, name) + d.SetId(id) return resourceArmStorageTableRead(d, meta) } @@ -84,41 +92,58 @@ func resourceArmStorageTableRead(d *schema.ResourceData, meta interface{}) error armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageTableID(d.Id()) + if err != nil { + return err + } - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) + if err != nil { + return err + } + + if resourceGroup == nil { + log.Printf("Unable to determine Resource Group for Storage Account %q (assuming removed)", id.storageAccountName) + d.SetId("") + return nil + } + + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) if err != nil { return err } + if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing table %q from state", storageAccountName, d.Id()) + log.Printf("[DEBUG] Storage account %q not found, removing table %q from state", id.storageAccountName, id.tableName) d.SetId("") return nil } - name := d.Get("name").(string) metaDataLevel := storage.MinimalMetadata options := &storage.QueryTablesOptions{} tables, err := tableClient.QueryTables(metaDataLevel, options) if err != nil { - return fmt.Errorf("Failed to retrieve storage tables in account %q: %s", name, err) + return fmt.Errorf("Failed to retrieve Tables in Storage Account %q: %s", id.tableName, err) } - var found bool + var storageTable *storage.Table for _, table := range tables.Tables { - tableName := string(table.Name) - if tableName == name { - found = true - d.Set("name", tableName) + if string(table.Name) == id.tableName { + storageTable = &table + break } } - if !found { - log.Printf("[INFO] Storage table %q does not exist in account %q, removing from state...", name, storageAccountName) + if storageTable == nil { + log.Printf("[INFO] Table %q does not exist in Storage Account %q, removing from state...", id.tableName, id.storageAccountName) d.SetId("") + return nil } + d.Set("name", id.tableName) + d.Set("storage_account_name", id.storageAccountName) + d.Set("resource_group_name", resourceGroup) + return nil } @@ -126,28 +151,64 @@ func resourceArmStorageTableDelete(d *schema.ResourceData, meta interface{}) err armClient := meta.(*ArmClient) ctx := armClient.StopContext - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) + id, err := parseStorageTableID(d.Id()) + if err != nil { + return err + } - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(ctx, resourceGroupName, storageAccountName) + resourceGroup, err := determineResourceGroupForStorageAccount(id.storageAccountName, armClient) + if err != nil { + return err + } + + if resourceGroup == nil { + log.Printf("Unable to determine Resource Group for Storage Account %q (assuming removed)", id.storageAccountName) + return nil + } + + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(ctx, *resourceGroup, id.storageAccountName) if err != nil { return err } if !accountExists { - log.Printf("[INFO] Storage Account %q doesn't exist so the table won't exist", storageAccountName) + log.Printf("[INFO] Storage Account %q doesn't exist so the table won't exist", id.storageAccountName) return nil } - name := d.Get("name").(string) - table := tableClient.GetTableReference(name) + table := tableClient.GetTableReference(id.tableName) timeout := uint(60) options := &storage.TableOptions{} - log.Printf("[INFO] Deleting storage table %q in account %q", name, storageAccountName) + log.Printf("[INFO] Deleting Table %q in Storage Account %q", id.tableName, id.storageAccountName) if err := table.Delete(timeout, options); err != nil { - return fmt.Errorf("Error deleting storage table %q from storage account %q: %s", name, storageAccountName, err) + return fmt.Errorf("Error deleting table %q from Storage Account %q: %s", id.tableName, id.storageAccountName, err) } - d.SetId("") return nil } + +type storageTableId struct { + storageAccountName string + tableName string +} + +func parseStorageTableID(input string) (*storageTableId, error) { + // https://myaccount.table.core.windows.net/table1 + uri, err := url.Parse(input) + if err != nil { + return nil, fmt.Errorf("Error parsing %q as a URI: %+v", input, err) + } + + segments := strings.Split(uri.Host, ".") + if len(segments) > 0 { + storageAccountName := segments[0] + table := strings.Replace(uri.Path, "/", "", 1) + id := storageTableId{ + storageAccountName: storageAccountName, + tableName: table, + } + return &id, nil + } + + return nil, nil +} diff --git a/azurerm/resource_arm_storage_table_migration.go b/azurerm/resource_arm_storage_table_migration.go new file mode 100644 index 0000000000000..e2da0116cca7b --- /dev/null +++ b/azurerm/resource_arm_storage_table_migration.go @@ -0,0 +1,40 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceStorageTableMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AzureRM Storage Table State v0; migrating to v1") + return migrateStorageTableStateV0toV1(is, meta) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStorageTableStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] ARM Storage Table Attributes before Migration: %#v", is.Attributes) + + environment := meta.(*ArmClient).environment + + tableName := is.Attributes["name"] + storageAccountName := is.Attributes["storage_account_name"] + newID := fmt.Sprintf("https://%s.table.%s/%s", storageAccountName, environment.StorageEndpointSuffix, tableName) + is.Attributes["id"] = newID + is.ID = newID + + log.Printf("[DEBUG] ARM Storage Table Attributes after State Migration: %#v", is.Attributes) + + return is, nil +} diff --git a/azurerm/resource_arm_storage_table_migration_test.go b/azurerm/resource_arm_storage_table_migration_test.go new file mode 100644 index 0000000000000..198468243c397 --- /dev/null +++ b/azurerm/resource_arm_storage_table_migration_test.go @@ -0,0 +1,66 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +// NOTE: this is intentionally an acceptance test (and we're not explicitly setting the env) +// as we want to run this depending on the cloud we're in. +func TestAccAzureRMStorageTableMigrateState(t *testing.T) { + config := testGetAzureConfig(t) + if config == nil { + t.SkipNow() + return + } + + client, err := getArmClient(config) + if err != nil { + t.Fatal(fmt.Errorf("Error building ARM Client: %+v", err)) + return + } + + client.StopContext = testAccProvider.StopContext() + + suffix := client.environment.StorageEndpointSuffix + + cases := map[string]struct { + StateVersion int + ID string + InputAttributes map[string]string + ExpectedAttributes map[string]string + }{ + "v0_1_without_value": { + StateVersion: 0, + ID: "some_id", + InputAttributes: map[string]string{ + "name": "table1", + "storage_account_name": "example", + }, + ExpectedAttributes: map[string]string{ + "id": fmt.Sprintf("https://example.table.%s/table1", suffix), + }, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.InputAttributes, + } + is, err := resourceStorageTableMigrateState(tc.StateVersion, is, client) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.ExpectedAttributes { + actual := is.Attributes[k] + if actual != v { + t.Fatalf("Bad Storage Table Migrate for %q: %q\n\n expected: %q", k, actual, v) + } + } + } +} diff --git a/azurerm/resource_arm_storage_table_test.go b/azurerm/resource_arm_storage_table_test.go index 983c3ff5edc82..4014787ef35fd 100644 --- a/azurerm/resource_arm_storage_table_test.go +++ b/azurerm/resource_arm_storage_table_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccAzureRMStorageTable_basic(t *testing.T) { + resourceName := "azurerm_storage_table.test" var table storage.Table ri := acctest.RandInt() @@ -27,9 +28,14 @@ func TestAccAzureRMStorageTable_basic(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageTableExists("azurerm_storage_table.test", &table), + testCheckAzureRMStorageTableExists(resourceName, &table), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } diff --git a/website/docs/r/storage_blob.html.markdown b/website/docs/r/storage_blob.html.markdown index 12c3d369ff09c..e0782c81c9de6 100644 --- a/website/docs/r/storage_blob.html.markdown +++ b/website/docs/r/storage_blob.html.markdown @@ -79,5 +79,13 @@ The following arguments are supported: The following attributes are exported in addition to the arguments listed above: -* `id` - The storage blob Resource ID. +* `id` - The ID of the Storage Blob. * `url` - The URL of the blob + +## Import + +Storage Blob's can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_blob.blob1 https://example.blob.core.windows.net/container/blob.vhd +``` diff --git a/website/docs/r/storage_container.html.markdown b/website/docs/r/storage_container.html.markdown index 75646f7ca57b5..2ad592eb1899c 100644 --- a/website/docs/r/storage_container.html.markdown +++ b/website/docs/r/storage_container.html.markdown @@ -56,5 +56,13 @@ The following arguments are supported: The following attributes are exported in addition to the arguments listed above: -* `id` - The storage container Resource ID. +* `id` - The ID of the Storage Container. * `properties` - Key-value definition of additional properties associated to the storage container + +## Import + +Storage Containers can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_container.container1 https://example.blob.core.windows.net/container +``` diff --git a/website/docs/r/storage_queue.html.markdown b/website/docs/r/storage_queue.html.markdown index a5619267a897d..dacadf36bdb9c 100644 --- a/website/docs/r/storage_queue.html.markdown +++ b/website/docs/r/storage_queue.html.markdown @@ -49,4 +49,12 @@ The following arguments are supported: The following attributes are exported in addition to the arguments listed above: -* `id` - The storage queue Resource ID. +* `id` - The ID of the Storage Queue. + +## Import + +Storage Queue's can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_queue.queue1 https://example.queue.core.windows.net/queue1 +``` diff --git a/website/docs/r/storage_table.html.markdown b/website/docs/r/storage_table.html.markdown index 4bc10fb20d35f..e383c0445febe 100644 --- a/website/docs/r/storage_table.html.markdown +++ b/website/docs/r/storage_table.html.markdown @@ -49,4 +49,12 @@ The following arguments are supported: The following attributes are exported in addition to the arguments listed above: -* `id` - The storage table Resource ID. +* `id` - The ID of the Storage Table. + +## Import + +Storage Table's can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_table.table1 https://example.table.core.windows.net/table1 +``` From 73a67a1b507a169856f147de6c87536d773354a8 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 13:39:21 +0100 Subject: [PATCH 05/13] Updating to include #1816 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index de04dc62d415e..866e0571f30ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ IMPROVEMENTS: * `azurerm_iothub` - exporting the `event_hub_events_endpoint`, `event_hub_events_path`, `event_hub_operations_endpoint` and `event_hub_operations_path` fields [GH-1789] * `azurerm_iothub` - support for `endpoint` and `route` blocks [GH-1693] +* `azurerm_storage_blob` - support for import [GH-1816] +* `azurerm_storage_container` - support for import [GH-1816] +* `azurerm_storage_queue` - support for import [GH-1816] +* `azurerm_storage_table` - support for import [GH-1816] BUG FIXES: From 244f1d7a99c8169660926042709b3b4ed049c2d4 Mon Sep 17 00:00:00 2001 From: Su Shi <1684739+metacpp@users.noreply.github.com> Date: Thu, 30 Aug 2018 05:43:18 -0700 Subject: [PATCH 06/13] linux_profile should be optional to align with API spec (#1821) `azurerm_kubernetes_cluster` - making `linux_profile` optional --- azurerm/import_arm_kubernetes_cluster_test.go | 25 ++++++ azurerm/resource_arm_kubernetes_cluster.go | 50 +++++++---- .../resource_arm_kubernetes_cluster_test.go | 84 ++++++++++++++----- .../docs/r/kubernetes_cluster.html.markdown | 10 +-- 4 files changed, 124 insertions(+), 45 deletions(-) diff --git a/azurerm/import_arm_kubernetes_cluster_test.go b/azurerm/import_arm_kubernetes_cluster_test.go index a90fa889a9d4f..c1e8c6776da07 100644 --- a/azurerm/import_arm_kubernetes_cluster_test.go +++ b/azurerm/import_arm_kubernetes_cluster_test.go @@ -32,3 +32,28 @@ func TestAccAzureRMKubernetesCluster_importBasic(t *testing.T) { }, }) } + +func TestAccAzureRMKubernetesCluster_importLinuxProfile(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + + ri := acctest.RandInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, testLocation()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 2e245b57d09cf..125903ef607a6 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -135,7 +135,7 @@ func resourceArmKubernetesCluster() *schema.Resource { "linux_profile": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -403,7 +403,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} AgentPoolProfiles: &agentProfiles, DNSPrefix: &dnsPrefix, KubernetesVersion: &kubernetesVersion, - LinuxProfile: &linuxProfile, + LinuxProfile: linuxProfile, ServicePrincipalProfile: servicePrincipalProfile, NetworkProfile: networkProfile, }, @@ -532,33 +532,40 @@ func resourceArmKubernetesClusterDelete(d *schema.ResourceData, meta interface{} return future.WaitForCompletionRef(ctx, kubernetesClustersClient.Client) } -func flattenAzureRmKubernetesClusterLinuxProfile(input *containerservice.LinuxProfile) []interface{} { +func flattenAzureRmKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile) []interface{} { + if profile == nil { + return []interface{}{} + } + values := make(map[string]interface{}) sshKeys := make([]interface{}, 0) - if profile := input; profile != nil { - if username := profile.AdminUsername; username != nil { - values["admin_username"] = *username - } + if username := profile.AdminUsername; username != nil { + values["admin_username"] = *username + } - if ssh := profile.SSH; ssh != nil { - if keys := ssh.PublicKeys; keys != nil { - for _, sshKey := range *keys { - outputs := make(map[string]interface{}, 0) - if keyData := sshKey.KeyData; keyData != nil { - outputs["key_data"] = *keyData - } - sshKeys = append(sshKeys, outputs) + if ssh := profile.SSH; ssh != nil { + if keys := ssh.PublicKeys; keys != nil { + for _, sshKey := range *keys { + outputs := make(map[string]interface{}, 0) + if keyData := sshKey.KeyData; keyData != nil { + outputs["key_data"] = *keyData } + sshKeys = append(sshKeys, outputs) } } } + values["ssh_key"] = sshKeys return []interface{}{values} } func flattenAzureRmKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.ManagedClusterAgentPoolProfile, fqdn *string) []interface{} { + if profiles == nil { + return []interface{}{} + } + agentPoolProfiles := make([]interface{}, 0) for _, profile := range *profiles { @@ -650,6 +657,10 @@ func flattenAzureRmKubernetesClusterAccessProfile(profile *containerservice.Mana } func flattenAzureRmKubernetesClusterNetworkProfile(profile *containerservice.NetworkProfile) []interface{} { + if profile == nil { + return []interface{}{} + } + values := make(map[string]interface{}) values["network_plugin"] = profile.NetworkPlugin @@ -690,8 +701,13 @@ func flattenKubernetesClusterKubeConfig(config kubernetes.KubeConfig) []interfac return []interface{}{values} } -func expandAzureRmKubernetesClusterLinuxProfile(d *schema.ResourceData) containerservice.LinuxProfile { +func expandAzureRmKubernetesClusterLinuxProfile(d *schema.ResourceData) *containerservice.LinuxProfile { profiles := d.Get("linux_profile").([]interface{}) + + if len(profiles) == 0 { + return nil + } + config := profiles[0].(map[string]interface{}) adminUsername := config["admin_username"].(string) @@ -714,7 +730,7 @@ func expandAzureRmKubernetesClusterLinuxProfile(d *schema.ResourceData) containe }, } - return profile + return &profile } func expandAzureRmKubernetesClusterServicePrincipal(d *schema.ResourceData) *containerservice.ServicePrincipalProfile { diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index f40e238082056..86fde1c0837aa 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -98,6 +98,36 @@ func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, testLocation()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), + ), + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := acctest.RandInt() @@ -150,7 +180,7 @@ func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { Config: upgradeConfig, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.8.1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.11.2"), ), }, }, @@ -336,7 +366,34 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" + kubernetes_version = "1.10.7" + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_linuxProfile(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" linux_profile { admin_username = "acctestuser%d" @@ -372,15 +429,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } + kubernetes_version = "1.10.7" agent_pool_profile { name = "default" @@ -393,7 +442,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "%s" } } -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_internalNetwork(rInt int, clientId string, clientSecret string, location string) string { @@ -426,7 +475,6 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" linux_profile { admin_username = "acctestuser%d" @@ -471,7 +519,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" + linux_profile { admin_username = "acctestuser%d" ssh_key { @@ -489,7 +537,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_id = "%s" client_secret = "%s" } - + addon_profile { oms_agent { enabled = true @@ -512,7 +560,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" + linux_profile { admin_username = "acctestuser%d" ssh_key { @@ -552,7 +600,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.8.1" + kubernetes_version = "1.11.2" linux_profile { admin_username = "acctestuser%d" @@ -606,7 +654,6 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" linux_profile { admin_username = "acctestuser%d" @@ -665,7 +712,6 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" linux_profile { admin_username = "acctestuser%d" diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 49113a917c713..c34215d04c170 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -27,14 +27,6 @@ resource "azurerm_kubernetes_cluster" "test" { resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix   = "acctestagent1" - linux_profile { - admin_username = "acctestuser1" - - ssh_key { - key_data = "ssh-rsa ..." - } - } - agent_pool_profile { name = "default" count = 1 @@ -180,7 +172,7 @@ The following arguments are supported: * `dns_prefix` - (Required) DNS prefix specified when creating the managed cluster. -* `linux_profile` - (Required) A Linux Profile block as documented below. +* `linux_profile` - (Optional) A Linux Profile block as documented below. * `agent_pool_profile` - (Required) One or more Agent Pool Profile's block as documented below. From 6ab46f8b0474b5825f26927a3aea2558f69782fc Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 13:43:39 +0100 Subject: [PATCH 07/13] Updating to include #1821 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 866e0571f30ab..05dc7bfedd1bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ IMPROVEMENTS: * `azurerm_iothub` - exporting the `event_hub_events_endpoint`, `event_hub_events_path`, `event_hub_operations_endpoint` and `event_hub_operations_path` fields [GH-1789] * `azurerm_iothub` - support for `endpoint` and `route` blocks [GH-1693] +* `azurerm_kubernetes_cluster` - making `linux_profile` optional [GH-1821] * `azurerm_storage_blob` - support for import [GH-1816] * `azurerm_storage_container` - support for import [GH-1816] * `azurerm_storage_queue` - support for import [GH-1816] From 032fbe66284dbcddcdadd4570381dfb6d190d8e7 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 14:57:13 +0100 Subject: [PATCH 08/13] r/Logic App: ensuring parameters are strings prior to setting (#1843) --- azurerm/resource_arm_logic_app_workflow.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/azurerm/resource_arm_logic_app_workflow.go b/azurerm/resource_arm_logic_app_workflow.go index d25c95549b59f..0f7bd038dad4f 100644 --- a/azurerm/resource_arm_logic_app_workflow.go +++ b/azurerm/resource_arm_logic_app_workflow.go @@ -256,7 +256,13 @@ func flattenLogicAppWorkflowParameters(input map[string]*logic.WorkflowParameter for k, v := range input { if v != nil { - output[k] = v.Value.(string) + // we only support string parameters at this time + val, ok := v.Value.(string) + if !ok { + log.Printf("[DEBUG] Skipping parameter %q since it's not a string", k) + } + + output[k] = val } } From f6e825ce5babfd977314ce8a6f2a28a714395ad4 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 14:57:47 +0100 Subject: [PATCH 09/13] Updating to include #1843 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05dc7bfedd1bb..8a3e8d1249c36 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ BUG FIXES: * `azurerm_eventhub_namespace` - updating the validation error [GH-1795] * `azurerm_function_app` - support for names in upper-case [GH-1835] * `azurerm_kubernetes_cluster` - removing validation for the `pod_cidr` field when `network_plugin` is set to `azure` [GH-1798] +* `azurerm_logic_app_workflow` - ensuring parameters are strings [GH-1843] * `azurerm_virtual_machine` - setting the `image_uri` property within the `storage_os_disk` block [GH-1799] ## 1.13.0 (August 15, 2018) From 346fa814097bf116038433f4686cade7f499f916 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 15:57:39 +0100 Subject: [PATCH 10/13] Authentication: registering all clients consistently (#1845) --- azurerm/config.go | 124 ++++++++++------------------------------------ 1 file changed, 25 insertions(+), 99 deletions(-) diff --git a/azurerm/config.go b/azurerm/config.go index 2461f7cbf67f8..2a382a35c3303 100644 --- a/azurerm/config.go +++ b/azurerm/config.go @@ -444,70 +444,43 @@ func getArmClient(c *authentication.Config) (*ArmClient, error) { func (c *ArmClient) registerAppInsightsClients(endpoint, subscriptionId string, auth autorest.Authorizer, sender autorest.Sender) { ai := appinsights.NewComponentsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&ai.Client) - ai.Authorizer = auth - ai.Sender = sender - ai.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&ai.Client, auth) c.appInsightsClient = ai } func (c *ArmClient) registerAutomationClients(endpoint, subscriptionId string, auth autorest.Authorizer, sender autorest.Sender) { accountClient := automation.NewAccountClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&accountClient.Client) - accountClient.Authorizer = auth - accountClient.Sender = sender - accountClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&accountClient.Client, auth) c.automationAccountClient = accountClient credentialClient := automation.NewCredentialClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&credentialClient.Client) - credentialClient.Authorizer = auth - credentialClient.Sender = sender - credentialClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&credentialClient.Client, auth) c.automationCredentialClient = credentialClient runbookClient := automation.NewRunbookClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&runbookClient.Client) - runbookClient.Authorizer = auth - runbookClient.Sender = sender - runbookClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&runbookClient.Client, auth) c.automationRunbookClient = runbookClient scheduleClient := automation.NewScheduleClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&scheduleClient.Client) - scheduleClient.Authorizer = auth - scheduleClient.Sender = sender - scheduleClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&scheduleClient.Client, auth) c.automationScheduleClient = scheduleClient } func (c *ArmClient) registerAuthentication(endpoint, graphEndpoint, subscriptionId, tenantId string, auth, graphAuth autorest.Authorizer, sender autorest.Sender) { assignmentsClient := authorization.NewRoleAssignmentsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&assignmentsClient.Client) - assignmentsClient.Authorizer = auth - assignmentsClient.Sender = sender - assignmentsClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&assignmentsClient.Client, auth) c.roleAssignmentsClient = assignmentsClient definitionsClient := authorization.NewRoleDefinitionsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&definitionsClient.Client) - definitionsClient.Authorizer = auth - definitionsClient.Sender = sender - definitionsClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&definitionsClient.Client, auth) c.roleDefinitionsClient = definitionsClient applicationsClient := graphrbac.NewApplicationsClientWithBaseURI(graphEndpoint, tenantId) - setUserAgent(&applicationsClient.Client) - applicationsClient.Authorizer = graphAuth - applicationsClient.Sender = sender - applicationsClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&applicationsClient.Client, graphAuth) c.applicationsClient = applicationsClient servicePrincipalsClient := graphrbac.NewServicePrincipalsClientWithBaseURI(graphEndpoint, tenantId) - setUserAgent(&servicePrincipalsClient.Client) - servicePrincipalsClient.Authorizer = graphAuth - servicePrincipalsClient.Sender = sender - servicePrincipalsClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&servicePrincipalsClient.Client, graphAuth) c.servicePrincipalsClient = servicePrincipalsClient } @@ -600,31 +573,19 @@ func (c *ArmClient) registerContainerServicesClients(endpoint, subscriptionId st func (c *ArmClient) registerDatabases(endpoint, subscriptionId string, auth autorest.Authorizer, sender autorest.Sender) { // MySQL mysqlConfigClient := mysql.NewConfigurationsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&mysqlConfigClient.Client) - mysqlConfigClient.Authorizer = auth - mysqlConfigClient.Sender = sender - mysqlConfigClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&mysqlConfigClient.Client, auth) c.mysqlConfigurationsClient = mysqlConfigClient mysqlDBClient := mysql.NewDatabasesClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&mysqlDBClient.Client) - mysqlDBClient.Authorizer = auth - mysqlDBClient.Sender = sender - mysqlDBClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&mysqlDBClient.Client, auth) c.mysqlDatabasesClient = mysqlDBClient mysqlFWClient := mysql.NewFirewallRulesClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&mysqlFWClient.Client) - mysqlFWClient.Authorizer = auth - mysqlFWClient.Sender = sender - mysqlFWClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&mysqlFWClient.Client, auth) c.mysqlFirewallRulesClient = mysqlFWClient mysqlServersClient := mysql.NewServersClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&mysqlServersClient.Client) - mysqlServersClient.Authorizer = auth - mysqlServersClient.Sender = sender - mysqlServersClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&mysqlServersClient.Client, auth) c.mysqlServersClient = mysqlServersClient // PostgreSQL @@ -646,38 +607,23 @@ func (c *ArmClient) registerDatabases(endpoint, subscriptionId string, auth auto // SQL Azure sqlDBClient := sql.NewDatabasesClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&sqlDBClient.Client) - sqlDBClient.Authorizer = auth - sqlDBClient.Sender = sender - sqlDBClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&sqlDBClient.Client, auth) c.sqlDatabasesClient = sqlDBClient sqlFWClient := sql.NewFirewallRulesClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&sqlFWClient.Client) - sqlFWClient.Authorizer = auth - sqlFWClient.Sender = sender - sqlFWClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&sqlFWClient.Client, auth) c.sqlFirewallRulesClient = sqlFWClient sqlEPClient := sql.NewElasticPoolsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&sqlEPClient.Client) - sqlEPClient.Authorizer = auth - sqlEPClient.Sender = sender - sqlEPClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&sqlEPClient.Client, auth) c.sqlElasticPoolsClient = sqlEPClient sqlSrvClient := sql.NewServersClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&sqlSrvClient.Client) - sqlSrvClient.Authorizer = auth - sqlSrvClient.Sender = sender - sqlSrvClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&sqlSrvClient.Client, auth) c.sqlServersClient = sqlSrvClient sqlADClient := sql.NewServerAzureADAdministratorsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&sqlADClient.Client) - sqlADClient.Authorizer = auth - sqlADClient.Sender = sender - sqlADClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&sqlADClient.Client, auth) c.sqlServerAzureADAdministratorsClient = sqlADClient sqlVNRClient := sql.NewVirtualNetworkRulesClientWithBaseURI(endpoint, subscriptionId) @@ -725,49 +671,31 @@ func (c *ArmClient) registerDNSClients(endpoint, subscriptionId string, auth aut func (c *ArmClient) registerEventGridClients(endpoint, subscriptionId string, auth autorest.Authorizer, sender autorest.Sender) { egtc := eventgrid.NewTopicsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&egtc.Client) - egtc.Authorizer = auth - egtc.Sender = sender - egtc.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&egtc.Client, auth) c.eventGridTopicsClient = egtc } func (c *ArmClient) registerEventHubClients(endpoint, subscriptionId string, auth autorest.Authorizer, sender autorest.Sender) { ehc := eventhub.NewEventHubsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&ehc.Client) - ehc.Authorizer = auth - ehc.Sender = sender - ehc.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&ehc.Client, auth) c.eventHubClient = ehc chcgc := eventhub.NewConsumerGroupsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&chcgc.Client) - chcgc.Authorizer = auth - chcgc.Sender = sender - chcgc.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&chcgc.Client, auth) c.eventHubConsumerGroupClient = chcgc ehnc := eventhub.NewNamespacesClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&ehnc.Client) - ehnc.Authorizer = auth - ehnc.Sender = sender - ehnc.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&ehnc.Client, auth) c.eventHubNamespacesClient = ehnc } func (c *ArmClient) registerKeyVaultClients(endpoint, subscriptionId string, auth autorest.Authorizer, keyVaultAuth autorest.Authorizer, sender autorest.Sender) { keyVaultClient := keyvault.NewVaultsClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&keyVaultClient.Client) - keyVaultClient.Authorizer = auth - keyVaultClient.Sender = sender - keyVaultClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&keyVaultClient.Client, auth) c.keyVaultClient = keyVaultClient keyVaultManagementClient := keyVault.New() - setUserAgent(&keyVaultManagementClient.Client) - keyVaultManagementClient.Authorizer = keyVaultAuth - keyVaultManagementClient.Sender = sender - keyVaultManagementClient.SkipResourceProviderRegistration = c.skipProviderRegistration + c.configureClient(&keyVaultManagementClient.Client, keyVaultAuth) c.keyVaultManagementClient = keyVaultManagementClient } @@ -783,9 +711,7 @@ func (c *ArmClient) registerMonitorClients(endpoint, subscriptionId string, auth c.actionGroupsClient = actionGroupsClient arc := insights.NewAlertRulesClientWithBaseURI(endpoint, subscriptionId) - setUserAgent(&arc.Client) - arc.Authorizer = auth - arc.Sender = autorest.CreateSender(withRequestLogging()) + c.configureClient(&arc.Client, auth) c.monitorAlertRulesClient = arc autoscaleSettingsClient := insights.NewAutoscaleSettingsClientWithBaseURI(endpoint, subscriptionId) From 1a72110fb44e2773fa6abf173f50c5908bd9a83f Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 30 Aug 2018 15:58:34 +0100 Subject: [PATCH 11/13] Updating to include #1845 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a3e8d1249c36..1c1ab7509c956 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ IMPROVEMENTS: +* authentication: making the client registration consistent [GH-1845] * `azurerm_iothub` - exporting the `event_hub_events_endpoint`, `event_hub_events_path`, `event_hub_operations_endpoint` and `event_hub_operations_path` fields [GH-1789] * `azurerm_iothub` - support for `endpoint` and `route` blocks [GH-1693] * `azurerm_kubernetes_cluster` - making `linux_profile` optional [GH-1821] From 8b054071229f218960d43e770d73b0aa2269d7f5 Mon Sep 17 00:00:00 2001 From: Tim Curless Date: Fri, 31 Aug 2018 13:33:55 -0500 Subject: [PATCH 12/13] Adding documentation updates --- .../docs/d/kubernetes_cluster.html.markdown | 16 ++++++ .../docs/r/kubernetes_cluster.html.markdown | 54 +++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 354a4a6e436f0..a6bf4b02daf25 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -39,6 +39,8 @@ The following attributes are exported: * `fqdn` - The FQDN of the Azure Kubernetes Managed Cluster. +* `enable_rbac` - Whether Role Based Access Control is currently enabled. + * `kube_config_raw` - Base64 encoded Kubernetes configuration. * `node_resource_group` - Auto-generated Resource Group containing AKS Cluster resources. @@ -61,6 +63,8 @@ The following attributes are exported: * `network_profile` - A `network_profile` block as documented below. +* `aad_profile` - If AzureAD integration with RBAC is in use, a `aad_profile` block as documented below. + * `tags` - A mapping of tags assigned to this resource. --- @@ -144,6 +148,18 @@ A `oms_agent` block exports the following: --- +A `aad_profile` block exports the following: + +* `server_app_id` - AzureAD Server Application ID. + +* `server_app_secret` - AzureAD Server Application Secret. + +* `client_id` - AzureAD Client Application ID. + +* `tenant_id` - AzureAD Tenant ID. + +--- + A `service_principal` block supports the following: * `client_id` - The Client ID of the Service Principal used by this Managed Kubernetes Cluster. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index c34215d04c170..3e0496021e74c 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -178,6 +178,8 @@ The following arguments are supported: * `service_principal` - (Required) A Service Principal block as documented below. +* `enable_rbac` - (Optional) True or False. Enables or Disables Kubernetes Role Based Access Control (RBAC). Defaults to True. Changing this forces a new resource to be created. + --- * `addon_profile` - (Optional) A `addon_profile` block. @@ -187,6 +189,8 @@ The following arguments are supported: * `network_profile` - (Optional) A Network Profile block as documented below. -> **NOTE:** If `network_profile` is not defined, `kubenet` profile will be used by default. +* `aad_profile` - (Optional) An `aad_profile` block. Used to integrate AzureAD with RBAC. `enable_rbac` must be set to true. + * `tags` - (Optional) A mapping of tags to assign to the resource. --- @@ -284,6 +288,38 @@ resource "azurerm_kubernetes_cluster" "test" { [**Find out more about AKS Advanced Networking**](https://docs.microsoft.com/en-us/azure/aks/networking-overview#advanced-networking) +--- + +A `aad_profile` block supports the following: + +* `server_app_id` - (Required) AzureAD Server Application ID. + +* `server_app_secret` - (Required) AzureAD Server Application Secret. + +* `client_id` - (Required) AzureAD Client Application ID. + +* `tenant_id` - (Required) AzureAD Tenant ID. + + +Here's an example of configuring an AzureAD RBAC Handler: + +``` +resource "azurerm_kubernetes_cluster" "test" { + # ... + + enable_rbac = true + + aad_profile { + client_id = "..." + server_app_id = "..." + server_app_secret = "..." + tenant_id = "..." + } +} +``` + +[**Find out more about AKS RBAC using AzureAD**](https://docs.microsoft.com/en-us/azure/aks/aad-integration) + ## Attributes Reference The following attributes are exported: @@ -294,6 +330,8 @@ The following attributes are exported: * `node_resource_group` - Auto-generated Resource Group containing AKS Cluster resources. +* `enable_rbac` - Whether Role Based Access Control is currently enabled. + * `kube_config_raw` - Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools @@ -302,6 +340,8 @@ The following attributes are exported: * `kube_config` - A `kube_config` block as defined below. +* `aad_profile` - If AzureAD integration with RBAC is in use a `aad_profile` block as defined below. + --- A `http_application_routing` block exports the following: @@ -338,6 +378,20 @@ provider "kubernetes" { } ``` + +--- + +A `aap_profile` block exports the following: + +* `server_app_id` - AzureAD Server Application ID. + +* `server_app_secret` - AzureAD Server Application Secret. + +* `client_id` - AzureAD Client Application ID. + +* `tenant_id` - AzureAD Tenant ID. + + ## Import Kubernetes Clusters can be imported using the `resource id`, e.g. From ef80ef0f528cf4d3e81997a912bbd51dd5e28c4f Mon Sep 17 00:00:00 2001 From: Tim Curless Date: Sat, 22 Sep 2018 22:01:26 -0500 Subject: [PATCH 13/13] Fixing issues with aadProfile server_app_secret always causing a new cluster --- azurerm/data_source_kubernetes_cluster.go | 10 ---- .../data_source_kubernetes_cluster_test.go | 44 +++++++++++++++ azurerm/helpers/kubernetes/kube_config.go | 22 ++++---- azurerm/resource_arm_kubernetes_cluster.go | 55 +++++++++++++------ .../resource_arm_kubernetes_cluster_test.go | 4 +- .../docs/d/kubernetes_cluster.html.markdown | 2 - .../docs/r/kubernetes_cluster.html.markdown | 12 ---- 7 files changed, 95 insertions(+), 54 deletions(-) diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go index 266e53b602fd8..e7191490c562d 100644 --- a/azurerm/data_source_kubernetes_cluster.go +++ b/azurerm/data_source_kubernetes_cluster.go @@ -186,12 +186,6 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, }, - "server_app_secret": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - "client_app_id": { Type: schema.TypeString, Computed: true, @@ -485,10 +479,6 @@ func flattenKubernetesClusterDataSourceAadProfile(profile *containerservice.Mana values["server_app_id"] = *serverAppId } - if serverAppSecret := profile.ServerAppSecret; serverAppSecret != nil { - values["server_app_secret"] = *serverAppSecret - } - if clientAppId := profile.ClientAppID; clientAppId != nil { values["client_app_id"] = *clientAppId } diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go index 7380984a9d90f..c7e91ad51d20e 100644 --- a/azurerm/data_source_kubernetes_cluster_test.go +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -38,6 +38,38 @@ func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { }) } +func TestAccDataSourceAzureRMKubernetesCluster_aadProfile(t *testing.T) { + dataSourceName := "data.azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + serverAppId := os.Getenv("ARM_SERVER_APP_ID") + serverAppSecret := os.Getenv("ARM_SERVER_APP_SECRET") + clientAppId := os.Getenv("ARM_CLIENT_APP_ID") + tenantId := os.Getenv("ARM_TENANT_ID") + location := testLocation() + config := testAccDataSourceAzureRMKubernetesCluster_rbacAAD(ri, clientId, clientSecret, location, serverAppId, serverAppSecret, clientAppId, tenantId) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(dataSourceName), + resource.TestCheckResourceAttr(dataSourceName, "aad_profile.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceName, "aad_profile.0.server_app_id"), + resource.TestCheckResourceAttrSet(dataSourceName, "aad_profile.0.server_app_secret"), + resource.TestCheckResourceAttrSet(dataSourceName, "aad_profile.0.client_app_id"), + resource.TestCheckResourceAttrSet(dataSourceName, "aad_profile.0.tenant_id"), + ), + }, + }, + }) +} + func TestAccDataSourceAzureRMKubernetesCluster_internalNetwork(t *testing.T) { dataSourceName := "data.azurerm_kubernetes_cluster.test" ri := acctest.RandInt() @@ -244,6 +276,18 @@ data "azurerm_kubernetes_cluster" "test" { `, resource) } +func testAccDataSourceAzureRMKubernetesCluster_rbacAAD(rInt int, clientId string, clientSecret string, location string, serverAppId string, serverAppSecret string, clientAppId string, tenantId string) string { + resource := testAccAzureRMKubernetesCluster_rbacAAD(rInt, clientId, clientSecret, location, serverAppId, serverAppSecret, clientAppId, tenantId) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster" "test" { + name = "${azurerm_kubernetes_cluster.test.name}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" +} +`, resource) +} + func testAccDataSourceAzureRMKubernetesCluster_internalNetwork(rInt int, clientId string, clientSecret string, location string) string { resource := testAccAzureRMKubernetesCluster_internalNetwork(rInt, clientId, clientSecret, location) return fmt.Sprintf(` diff --git a/azurerm/helpers/kubernetes/kube_config.go b/azurerm/helpers/kubernetes/kube_config.go index 5883c1dc27979..41eb924981312 100644 --- a/azurerm/helpers/kubernetes/kube_config.go +++ b/azurerm/helpers/kubernetes/kube_config.go @@ -27,21 +27,21 @@ type user struct { ClientKeyData string `yaml:"client-key-data"` } -type userItemRBAC struct { - Name string `yaml:"name"` - User userRBAC `yaml:"user"` +type userItemAAD struct { + Name string `yaml:"name"` + User userAAD `yaml:"user"` } -type userRBAC struct { +type userAAD struct { AuthProvider authProvider `yaml:"auth-provider"` } type authProvider struct { - Name string `yaml:"name"` - Config configRBACAzureAD `yaml:"config"` + Name string `yaml:"name"` + Config configAzureAD `yaml:"config"` } -type configRBACAzureAD struct { +type configAzureAD struct { APIServerID string `yaml:"apiserver-id,omitempty"` ClientID string `yaml:"client-id,omitempty"` TenantID string `yaml:"tenant-id,omitempty"` @@ -68,10 +68,10 @@ type KubeConfig struct { Preferences map[string]interface{} `yaml:"preferences,omitempty"` } -type KubeConfigRBAC struct { +type KubeConfigAAD struct { APIVersion string `yaml:"apiVersion"` Clusters []clusterItem `yaml:"clusters"` - Users []userItemRBAC `yaml:"users"` + Users []userItemAAD `yaml:"users"` Contexts []contextItem `yaml:"contexts,omitempty"` CurrentContext string `yaml:"current-context,omitempty"` Kind string `yaml:"kind,omitempty"` @@ -103,12 +103,12 @@ func ParseKubeConfig(config string) (*KubeConfig, error) { return &kubeConfig, nil } -func ParseKubeConfigRBAC(config string) (*KubeConfigRBAC, error) { +func ParseKubeConfigAAD(config string) (*KubeConfigAAD, error) { if config == "" { return nil, fmt.Errorf("Cannot parse empty config") } - var kubeConfig KubeConfigRBAC + var kubeConfig KubeConfigAAD err := yaml.Unmarshal([]byte(config), &kubeConfig) if err != nil { return nil, fmt.Errorf("Failed to unmarshal YAML config with error %+v", err) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 4a0ab1ab2962c..3ff71ad41cb0b 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -266,20 +266,20 @@ func resourceArmKubernetesCluster() *schema.Resource { }, "aad_profile": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, - ForceNew: true, MaxItems: 1, - // TODO: Validation Function Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "server_app_id": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "server_app_secret": { Type: schema.TypeString, + ForceNew: true, Required: true, Sensitive: true, }, @@ -287,14 +287,17 @@ func resourceArmKubernetesCluster() *schema.Resource { "client_app_id": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "tenant_id": { Type: schema.TypeString, Required: true, + ForceNew: true, }, }, }, + Set: resourceAzureRMKubernetesClusterAadProfileHash, }, "network_profile": { @@ -549,8 +552,9 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) } } - if d.Get("enable_rbac") == true { - kubeConfigRaw, kubeConfig := flattenAzureRmKubernetesClusterAccessProfileRBAC(&profile) + serverAppId, ok := d.GetOkExists("server_app_id") + if ok && serverAppId != "" { + kubeConfigRaw, kubeConfig := flattenAzureRmKubernetesClusterAccessProfileAAD(&profile) d.Set("kube_config_raw", kubeConfigRaw) if err := d.Set("kube_config", kubeConfig); err != nil { return fmt.Errorf("Error setting `kube_config`: %+v", err) @@ -693,11 +697,15 @@ func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerse return servicePrincipalProfiles } -func flattenAzureRmKubernetesClusterAadProfile(profile *containerservice.ManagedClusterAADProfile) []interface{} { +func flattenAzureRmKubernetesClusterAadProfile(profile *containerservice.ManagedClusterAADProfile) *schema.Set { if profile == nil { return nil } + aadProfiles := &schema.Set{ + F: resourceAzureRMKubernetesClusterAadProfileHash, + } + values := make(map[string]interface{}) if serverAppId := profile.ServerAppID; serverAppId != nil { @@ -716,7 +724,9 @@ func flattenAzureRmKubernetesClusterAadProfile(profile *containerservice.Managed values["tenant_id"] = *tenantId } - return []interface{}{values} + aadProfiles.Add(values) + + return aadProfiles } func flattenAzureRmKubernetesClusterAccessProfile(profile *containerservice.ManagedClusterAccessProfile) (*string, []interface{}) { @@ -738,18 +748,18 @@ func flattenAzureRmKubernetesClusterAccessProfile(profile *containerservice.Mana return nil, []interface{}{} } -func flattenAzureRmKubernetesClusterAccessProfileRBAC(profile *containerservice.ManagedClusterAccessProfile) (*string, []interface{}) { +func flattenAzureRmKubernetesClusterAccessProfileAAD(profile *containerservice.ManagedClusterAccessProfile) (*string, []interface{}) { if profile != nil { if accessProfile := profile.AccessProfile; accessProfile != nil { if kubeConfigRaw := accessProfile.KubeConfig; kubeConfigRaw != nil { rawConfig := string(*kubeConfigRaw) - kubeConfigRBAC, err := kubernetes.ParseKubeConfigRBAC(rawConfig) + kubeConfigAAD, err := kubernetes.ParseKubeConfigAAD(rawConfig) if err != nil { return utils.String(rawConfig), []interface{}{} } - flattenedKubeConfig := flattenKubernetesClusterKubeConfigRBAC(*kubeConfigRBAC) + flattenedKubeConfig := flattenKubernetesClusterKubeConfigAAD(*kubeConfigAAD) return utils.String(rawConfig), flattenedKubeConfig } } @@ -802,7 +812,7 @@ func flattenKubernetesClusterKubeConfig(config kubernetes.KubeConfig) []interfac return []interface{}{values} } -func flattenKubernetesClusterKubeConfigRBAC(config kubernetes.KubeConfigRBAC) []interface{} { +func flattenKubernetesClusterKubeConfigAAD(config kubernetes.KubeConfigAAD) []interface{} { values := make(map[string]interface{}) cluster := config.Clusters[0].Cluster @@ -915,13 +925,14 @@ func expandAzureRmKubernetesClusterAadProfile(d *schema.ResourceData) *container return nil } - profiles := value.([]interface{}) - profile := profiles[0].(map[string]interface{}) + configs := value.(*schema.Set).List() + + config := configs[0].(map[string]interface{}) - serverAppId := profile["server_app_id"].(string) - serverAppSecret := profile["server_app_secret"].(string) - clientAppId := profile["client_app_id"].(string) - tenantId := profile["tenant_id"].(string) + serverAppId := config["server_app_id"].(string) + serverAppSecret := config["server_app_secret"].(string) + clientAppId := config["client_app_id"].(string) + tenantId := config["tenant_id"].(string) aadProfile := containerservice.ManagedClusterAADProfile{ ServerAppID: &serverAppId, @@ -1063,6 +1074,16 @@ func resourceAzureRMKubernetesClusterServicePrincipalProfileHash(v interface{}) return hashcode.String(buf.String()) } +func resourceAzureRMKubernetesClusterAadProfileHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%s-", m["server_app_id"].(string))) + } + + return hashcode.String(buf.String()) +} + func validateKubernetesClusterAgentPoolName() schema.SchemaValidateFunc { return validation.StringMatch( regexp.MustCompile("^[a-z]{1}[a-z0-9]{0,11}$"), diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 525040867a5bd..2bbfb7992751b 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -118,7 +118,7 @@ func TestAccAzureRMKubernetesCluster_aadProfile(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "aad_profile.serverAppId"), + resource.TestCheckResourceAttr(resourceName, "aad_profile.#", "1"), ), }, }, @@ -484,7 +484,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" + kubernetes_version = "1.10.7" enable_rbac = true linux_profile { diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index a6bf4b02daf25..5c5bcdd1ea100 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -152,8 +152,6 @@ A `aad_profile` block exports the following: * `server_app_id` - AzureAD Server Application ID. -* `server_app_secret` - AzureAD Server Application Secret. - * `client_id` - AzureAD Client Application ID. * `tenant_id` - AzureAD Tenant ID. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 3e0496021e74c..5ca70b2b62183 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -340,8 +340,6 @@ The following attributes are exported: * `kube_config` - A `kube_config` block as defined below. -* `aad_profile` - If AzureAD integration with RBAC is in use a `aad_profile` block as defined below. - --- A `http_application_routing` block exports the following: @@ -381,16 +379,6 @@ provider "kubernetes" { --- -A `aap_profile` block exports the following: - -* `server_app_id` - AzureAD Server Application ID. - -* `server_app_secret` - AzureAD Server Application Secret. - -* `client_id` - AzureAD Client Application ID. - -* `tenant_id` - AzureAD Tenant ID. - ## Import