diff --git a/azurerm/internal/services/containers/nodepool_id.go b/azurerm/internal/services/containers/nodepool_id.go new file mode 100644 index 000000000000..9a51d5172e3a --- /dev/null +++ b/azurerm/internal/services/containers/nodepool_id.go @@ -0,0 +1,45 @@ +package containers + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type KubernetesNodePoolID struct { + Name string + ClusterName string + ResourceGroup string + + ID azure.ResourceID +} + +func ParseKubernetesNodePoolID(id string) (*KubernetesNodePoolID, error) { + clusterId, err := azure.ParseAzureResourceID(id) + if err != nil { + return nil, err + } + + resourceGroup := clusterId.ResourceGroup + if resourceGroup == "" { + return nil, fmt.Errorf("%q is missing a Resource Group", id) + } + + clusterName := clusterId.Path["managedClusters"] + if clusterName == "" { + return nil, fmt.Errorf("%q is missing the `managedClusters` segment", id) + } + + nodePoolName := clusterId.Path["agentPools"] + if nodePoolName == "" { + return nil, fmt.Errorf("%q is missing the `agentPools` segment", id) + } + + output := KubernetesNodePoolID{ + Name: nodePoolName, + ClusterName: clusterName, + ResourceGroup: resourceGroup, + ID: *clusterId, + } + return &output, nil +} diff --git a/azurerm/provider.go b/azurerm/provider.go index f8af16943280..d45c36b9a66a 100644 --- a/azurerm/provider.go +++ b/azurerm/provider.go @@ -313,6 +313,7 @@ func Provider() terraform.ResourceProvider { "azurerm_key_vault_secret": resourceArmKeyVaultSecret(), "azurerm_key_vault": resourceArmKeyVault(), "azurerm_kubernetes_cluster": resourceArmKubernetesCluster(), + "azurerm_kubernetes_cluster_node_pool": resourceArmKubernetesClusterNodePool(), "azurerm_kusto_cluster": resourceArmKustoCluster(), "azurerm_kusto_database": resourceArmKustoDatabase(), "azurerm_kusto_eventhub_data_connection": resourceArmKustoEventHubDataConnection(), diff --git a/azurerm/resource_arm_kubernetes_cluster_node_pool.go b/azurerm/resource_arm_kubernetes_cluster_node_pool.go new file mode 100644 index 000000000000..a9f55a54758b --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_node_pool.go @@ -0,0 +1,489 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-06-01/containerservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmKubernetesClusterNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceArmKubernetesClusterNodePoolCreate, + Read: resourceArmKubernetesClusterNodePoolRead, + Update: resourceArmKubernetesClusterNodePoolUpdate, + Delete: resourceArmKubernetesClusterNodePoolDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.KubernetesAgentPoolName, + }, + + "kubernetes_cluster_id": containers.KubernetesClusterIDSchema(), + + "node_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "vm_size": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + // Optional + "availability_zones": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "enable_auto_scaling": { + Type: schema.TypeBool, + Optional: true, + }, + + "enable_node_public_ip": { + Type: schema.TypeBool, + Optional: true, + }, + + "max_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "max_pods": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "min_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "node_taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "os_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + + "os_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: string(containerservice.Linux), + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.Linux), + string(containerservice.Windows), + }, false), + }, + + "vnet_subnet_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + } +} + +func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + clustersClient := meta.(*ArmClient).Containers.KubernetesClustersClient + poolsClient := meta.(*ArmClient).Containers.AgentPoolsClient + ctx := meta.(*ArmClient).StopContext + + kubernetesClusterId, err := containers.ParseKubernetesClusterID(d.Get("kubernetes_cluster_id").(string)) + if err != nil { + return err + } + + resourceGroup := kubernetesClusterId.ResourceGroup + clusterName := kubernetesClusterId.Name + name := d.Get("name").(string) + + log.Printf("[DEBUG] Retrieving Kubernetes Cluster %q (Resource Group %q)..", clusterName, resourceGroup) + cluster, err := clustersClient.Get(ctx, resourceGroup, clusterName) + if err != nil { + if utils.ResponseWasNotFound(cluster.Response) { + return fmt.Errorf("Kubernetes Cluster %q was not found in Resource Group %q!", clusterName, resourceGroup) + } + + return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): %+v", clusterName, resourceGroup, err) + } + + // try to provide a more helpful error here + defaultPoolIsVMSS := false + if props := cluster.ManagedClusterProperties; props != nil { + if pools := props.AgentPoolProfiles; pools != nil { + for _, p := range *pools { + if p.Type == containerservice.VirtualMachineScaleSets { + defaultPoolIsVMSS = true + break + } + } + } + } + if !defaultPoolIsVMSS { + return fmt.Errorf("The Default Node Pool for Kubernetes Cluster %q (Resource Group %q) must be a VirtualMachineScaleSet to attach multiple node pools!", clusterName, resourceGroup) + } + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := poolsClient.Get(ctx, resourceGroup, clusterName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Agent Pool %q (Kubernetes Cluster %q / Resource Group %q): %s", name, clusterName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_kubernetes_cluster_node_pool", *existing.ID) + } + } + + count := d.Get("node_count").(int) + enableAutoScaling := d.Get("enable_auto_scaling").(bool) + osType := d.Get("os_type").(string) + vmSize := d.Get("vm_size").(string) + + profile := containerservice.ManagedClusterAgentPoolProfileProperties{ + OsType: containerservice.OSType(osType), + EnableAutoScaling: utils.Bool(enableAutoScaling), + EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), + Type: containerservice.VirtualMachineScaleSets, + VMSize: containerservice.VMSizeTypes(vmSize), + + // this must always be sent during creation, but is optional for auto-scaled clusters during update + Count: utils.Int32(int32(count)), + } + + availabilityZonesRaw := d.Get("availability_zones").([]interface{}) + if availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw); len(*availabilityZones) > 0 { + profile.AvailabilityZones = availabilityZones + } + + if maxPods := int32(d.Get("max_pods").(int)); maxPods > 0 { + profile.MaxPods = utils.Int32(maxPods) + } + + nodeTaintsRaw := d.Get("node_taints").([]interface{}) + if nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw); len(*nodeTaints) > 0 { + profile.NodeTaints = nodeTaints + } + + if osDiskSizeGB := d.Get("os_disk_size_gb").(int); osDiskSizeGB > 0 { + profile.OsDiskSizeGB = utils.Int32(int32(osDiskSizeGB)) + } + + if vnetSubnetID := d.Get("vnet_subnet_id").(string); vnetSubnetID != "" { + profile.VnetSubnetID = utils.String(vnetSubnetID) + } + + maxCount := d.Get("max_count").(int) + minCount := d.Get("min_count").(int) + + if enableAutoScaling { + // handle count being optional + if count == 0 { + profile.Count = utils.Int32(int32(minCount)) + } + + if maxCount > 0 { + profile.MaxCount = utils.Int32(int32(maxCount)) + } else { + return fmt.Errorf("`max_count` must be configured when `enable_auto_scaling` is set to `true`") + } + + if minCount > 0 { + profile.MinCount = utils.Int32(int32(minCount)) + } else { + return fmt.Errorf("`min_count` must be configured when `enable_auto_scaling` is set to `true`") + } + + if minCount > maxCount { + return fmt.Errorf("`max_count` must be >= `min_count`") + } + } else if minCount > 0 || maxCount > 0 { + return fmt.Errorf("`max_count` and `min_count` must be set to `0` when enable_auto_scaling is set to `false`") + } + + parameters := containerservice.AgentPool{ + Name: &name, + ManagedClusterAgentPoolProfileProperties: &profile, + } + + future, err := poolsClient.CreateOrUpdate(ctx, resourceGroup, clusterName, name, parameters) + if err != nil { + return fmt.Errorf("Error creating/updating Managed Kubernetes Cluster Node Pool %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, poolsClient.Client); err != nil { + return fmt.Errorf("Error waiting for completion of Managed Kubernetes Cluster Node Pool %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + read, err := poolsClient.Get(ctx, resourceGroup, clusterName, name) + if err != nil { + return fmt.Errorf("Error retrieving Managed Kubernetes Cluster Node Pool %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if read.ID == nil { + return fmt.Errorf("Cannot read ID for Managed Kubernetes Cluster Node Pool %q (Resource Group %q)", name, resourceGroup) + } + + d.SetId(*read.ID) + + return resourceArmKubernetesClusterNodePoolRead(d, meta) +} + +func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).Containers.AgentPoolsClient + ctx := meta.(*ArmClient).StopContext + + id, err := containers.ParseKubernetesNodePoolID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + clusterName := id.ClusterName + name := id.Name + + d.Partial(true) + + log.Printf("[DEBUG] Retrieving existing Node Pool %q (Kubernetes Cluster %q / Resource Group %q)..", name, clusterName, resourceGroup) + existing, err := client.Get(ctx, resourceGroup, clusterName, name) + if err != nil { + if utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("[DEBUG] Node Pool %q was not found in Managed Kubernetes Cluster %q / Resource Group %q!", name, clusterName, resourceGroup) + } + + return fmt.Errorf("Error retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): %+v", name, clusterName, resourceGroup, err) + } + if existing.ManagedClusterAgentPoolProfileProperties == nil { + return fmt.Errorf("Error retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): `properties` was nil", name, clusterName, resourceGroup) + } + + props := existing.ManagedClusterAgentPoolProfileProperties + + // store the existing value should the user have opted to ignore it + enableAutoScaling := false + if props.EnableAutoScaling != nil { + enableAutoScaling = *props.EnableAutoScaling + } + + log.Printf("[DEBUG] Determining delta for existing Node Pool %q (Kubernetes Cluster %q / Resource Group %q)..", name, clusterName, resourceGroup) + + // delta patching + if d.HasChange("availability_zones") { + availabilityZonesRaw := d.Get("availability_zones").([]interface{}) + availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw) + props.AvailabilityZones = availabilityZones + } + + if d.HasChange("enable_auto_scaling") { + enableAutoScaling = d.Get("enable_auto_scaling").(bool) + props.EnableAutoScaling = utils.Bool(enableAutoScaling) + } + + if d.HasChange("enable_node_public_ip") { + props.EnableNodePublicIP = utils.Bool(d.Get("enable_node_public_ip").(bool)) + } + + if d.HasChange("max_count") { + props.MaxCount = utils.Int32(int32(d.Get("max_count").(int))) + } + + if d.HasChange("min_count") { + props.MinCount = utils.Int32(int32(d.Get("min_count").(int))) + } + + if d.HasChange("node_count") { + props.Count = utils.Int32(int32(d.Get("node_count").(int))) + } + + if d.HasChange("node_taints") { + nodeTaintsRaw := d.Get("node_taints").([]interface{}) + nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw) + props.NodeTaints = nodeTaints + } + + // validate the auto-scale fields are both set/unset to prevent a continual diff + maxCount := 0 + if props.MaxCount != nil { + maxCount = int(*props.MaxCount) + } + minCount := 0 + if props.MinCount != nil { + minCount = int(*props.MinCount) + } + if enableAutoScaling { + if maxCount == 0 { + return fmt.Errorf("`max_count` must be configured when `enable_auto_scaling` is set to `true`") + } + if minCount == 0 { + return fmt.Errorf("`min_count` must be configured when `enable_auto_scaling` is set to `true`") + } + + if minCount > maxCount { + return fmt.Errorf("`max_count` must be >= `min_count`") + } + } else if minCount > 0 || maxCount > 0 { + return fmt.Errorf("`max_count` and `min_count` must be set to `0` when enable_auto_scaling is set to `false`") + } + + log.Printf("[DEBUG] Updating existing Node Pool %q (Kubernetes Cluster %q / Resource Group %q)..", name, clusterName, resourceGroup) + existing.ManagedClusterAgentPoolProfileProperties = props + future, err := client.CreateOrUpdate(ctx, resourceGroup, clusterName, name, existing) + if err != nil { + return fmt.Errorf("Error updating Node Pool %q (Kubernetes Cluster %q / Resource Group %q): %+v", name, clusterName, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of Node Pool %q (Kubernetes Cluster %q / Resource Group %q): %+v", name, clusterName, resourceGroup, err) + } + + d.Partial(false) + + return resourceArmKubernetesClusterNodePoolRead(d, meta) +} + +func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interface{}) error { + clustersClient := meta.(*ArmClient).Containers.KubernetesClustersClient + poolsClient := meta.(*ArmClient).Containers.AgentPoolsClient + ctx := meta.(*ArmClient).StopContext + + id, err := containers.ParseKubernetesNodePoolID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + clusterName := id.ClusterName + name := id.Name + + // if the parent cluster doesn't exist then the node pool won't + cluster, err := clustersClient.Get(ctx, resourceGroup, clusterName) + if err != nil { + if utils.ResponseWasNotFound(cluster.Response) { + log.Printf("[DEBUG] Managed Kubernetes Cluster %q was not found in Resource Group %q - removing from state!", clusterName, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", clusterName, resourceGroup, err) + } + + resp, err := poolsClient.Get(ctx, resourceGroup, clusterName, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] Node Pool %q was not found in Managed Kubernetes Cluster %q / Resource Group %q - removing from state!", name, clusterName, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): %+v", name, clusterName, resourceGroup, err) + } + + d.Set("name", name) + d.Set("kubernetes_cluster_id", cluster.ID) + + if props := resp.ManagedClusterAgentPoolProfileProperties; props != nil { + if err := d.Set("availability_zones", utils.FlattenStringSlice(props.AvailabilityZones)); err != nil { + return fmt.Errorf("Error setting `availability_zones`: %+v", err) + } + + d.Set("enable_auto_scaling", props.EnableAutoScaling) + d.Set("enable_node_public_ip", props.EnableNodePublicIP) + + maxCount := 0 + if props.MaxCount != nil { + maxCount = int(*props.MaxCount) + } + d.Set("max_count", maxCount) + + maxPods := 0 + if props.MaxPods != nil { + maxPods = int(*props.MaxPods) + } + d.Set("max_pods", maxPods) + + minCount := 0 + if props.MinCount != nil { + minCount = int(*props.MinCount) + } + d.Set("min_count", minCount) + + count := 0 + if props.Count != nil { + count = int(*props.Count) + } + d.Set("node_count", count) + + if err := d.Set("node_taints", utils.FlattenStringSlice(props.NodeTaints)); err != nil { + return fmt.Errorf("Error setting `node_taints`: %+v", err) + } + + osDiskSizeGB := 0 + if props.OsDiskSizeGB != nil { + osDiskSizeGB = int(*props.OsDiskSizeGB) + } + d.Set("os_disk_size_gb", osDiskSizeGB) + d.Set("os_type", string(props.OsType)) + d.Set("vnet_subnet_id", props.VnetSubnetID) + d.Set("vm_size", string(props.VMSize)) + } + + return nil +} + +func resourceArmKubernetesClusterNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).Containers.AgentPoolsClient + ctx := meta.(*ArmClient).StopContext + + id, err := containers.ParseKubernetesNodePoolID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.ClusterName, id.Name) + if err != nil { + return fmt.Errorf("Error deleting Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): %+v", id.Name, id.ClusterName, id.ResourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for the deletion of Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): %+v", id.Name, id.ClusterName, id.ResourceGroup, err) + } + + return nil +} diff --git a/azurerm/resource_arm_kubernetes_cluster_node_pool_test.go b/azurerm/resource_arm_kubernetes_cluster_node_pool_test.go new file mode 100644 index 000000000000..f3a810a6c703 --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_node_pool_test.go @@ -0,0 +1,1168 @@ +package azurerm + +import ( + "fmt" + "net/http" + "os" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers" +) + +func TestAccAzureRMKubernetesClusterNodePool_autoScale(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + // Enabled + Config: testAccAzureRMKubernetesClusterNodePool_autoScale(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + // Disabled + Config: testAccAzureRMKubernetesClusterNodePool_manualScale(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + // Enabled + Config: testAccAzureRMKubernetesClusterNodePool_autoScale(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_autoScaleUpdate(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_autoScaleNodeCount(ri, clientId, clientSecret, location, 1, 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMKubernetesClusterNodePool_autoScaleNodeCount(ri, clientId, clientSecret, location, 3, 5), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMKubernetesClusterNodePool_autoScaleNodeCount(ri, clientId, clientSecret, location, 1, 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_availabilityZones(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_availabilityZones(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_errorForAvailabilitySet(t *testing.T) { + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_availabilitySet(ri, clientId, clientSecret, location), + ExpectError: regexp.MustCompile("must be a VirtualMachineScaleSet to attach multiple node pools"), + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_multiplePools(t *testing.T) { + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_multiplePools(ri, clientId, clientSecret, location, 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.autoscale"), + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.manual"), + ), + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.autoscale", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.manual", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_manualScale(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScale(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePools(t *testing.T) { + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePools(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.first"), + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.second"), + ), + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.first", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.second", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePoolsUpdate(t *testing.T) { + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePoolsNodeCount(ri, clientId, clientSecret, location, 1), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.first"), + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.second"), + ), + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.first", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.second", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePoolsNodeCount(ri, clientId, clientSecret, location, 2), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.first"), + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.second"), + ), + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.first", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.second", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_manualScaleUpdate(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleNodeCount(ri, clientId, clientSecret, location, 1), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + // up + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleNodeCount(ri, clientId, clientSecret, location, 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + // and down + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleNodeCount(ri, clientId, clientSecret, location, 1), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_manualScaleVMSku(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku(ri, clientId, clientSecret, location, "Standard_F2s_v2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku(ri, clientId, clientSecret, location, "Standard_F4s_v2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_nodePublicIP(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_nodePublicIP(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_nodeTaints(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_nodeTaints(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_requiresImport(t *testing.T) { + if !features.ShouldResourcesBeImported() { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_manualScale(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + Config: testAccAzureRMKubernetesClusterNodePool_requiresImport(ri, clientId, clientSecret, location), + ExpectError: testRequiresImportError("azurerm_kubernetes_cluster_node_pool"), + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_virtualNetworkManual(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_virtualNetworkManual(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_windows(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster_node_pool.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_windows(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesClusterNodePool_windowsAndLinux(t *testing.T) { + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_windowsAndLinux(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.linux"), + testCheckAzureRMKubernetesNodePoolExists("azurerm_kubernetes_cluster_node_pool.windows"), + ), + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.linux", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "azurerm_kubernetes_cluster_node_pool.windows", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testCheckAzureRMKubernetesClusterNodePoolDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_kubernetes_cluster_node_pool" { + continue + } + + name := rs.Primary.Attributes["name"] + kubernetesClusterId := rs.Primary.Attributes["kubernetes_cluster_id"] + parsedK8sId, err := containers.ParseKubernetesClusterID(kubernetesClusterId) + if err != nil { + return fmt.Errorf("Error parsing kubernetes cluster id: %+v", err) + } + + client := testAccProvider.Meta().(*ArmClient).Containers.AgentPoolsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.Name, name) + + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Managed Kubernetes Cluster still exists:\n%#v", resp) + } + } + + return nil +} + +func testCheckAzureRMKubernetesNodePoolExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + name := rs.Primary.Attributes["name"] + kubernetesClusterId := rs.Primary.Attributes["kubernetes_cluster_id"] + parsedK8sId, err := containers.ParseKubernetesClusterID(kubernetesClusterId) + if err != nil { + return fmt.Errorf("Error parsing kubernetes cluster id: %+v", err) + } + + client := testAccProvider.Meta().(*ArmClient).Containers.AgentPoolsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + agent_pool, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.Name, name) + if err != nil { + return fmt.Errorf("Bad: Get on kubernetesClustersClient: %+v", err) + } + + if agent_pool.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q) does not exist", name, parsedK8sId.Name, parsedK8sId.ResourceGroup) + } + + return nil + } +} + +func testAccAzureRMKubernetesClusterNodePool_autoScale(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + enable_auto_scaling = true + min_count = 1 + max_count = 3 +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_autoScaleNodeCount(rInt int, clientId, clientSecret, location string, min int, max int) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + enable_auto_scaling = true + min_count = %d + max_count = %d +} +`, template, min, max) +} + +func testAccAzureRMKubernetesClusterNodePool_availabilitySet(rInt int, clientId, clientSecret, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + type = "AvailabilitySet" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesClusterNodePool_availabilityZones(rInt int, clientId, clientSecret, location string) string { + return fmt.Sprintf(` + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.1.0.0/24" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "azure" + load_balancer_sku = "Standard" + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + availability_zones = ["1"] + vnet_subnet_id = azurerm_subnet.test.id +} +`, rInt, location, rInt, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesClusterNodePool_manualScale(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePools(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "first" { + name = "first" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 +} + +resource "azurerm_kubernetes_cluster_node_pool" "second" { + name = "second" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_F2s_v2" + node_count = 1 +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePoolsNodeCount(rInt int, clientId, clientSecret, location string, numberOfAgents int) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "first" { + name = "first" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = %d +} + +resource "azurerm_kubernetes_cluster_node_pool" "second" { + name = "second" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_F2s_v2" + node_count = %d +} +`, template, numberOfAgents, numberOfAgents) +} + +func testAccAzureRMKubernetesClusterNodePool_manualScaleNodeCount(rInt int, clientId, clientSecret, location string, numberOfAgents int) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = %d +} +`, template, numberOfAgents) +} + +func testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku(rInt int, clientId, clientSecret, location, sku string) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "%s" + node_count = 1 +} +`, template, sku) +} + +func testAccAzureRMKubernetesClusterNodePool_multiplePools(rInt int, clientId, clientSecret, location string, numberOfAgents int) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "autoscale" { + name = "autoscale" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + enable_auto_scaling = true + min_count = 1 + max_count = 3 +} + +resource "azurerm_kubernetes_cluster_node_pool" "manual" { + name = "manual" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_F2s_v2" + node_count = %d +} +`, template, numberOfAgents) +} + +func testAccAzureRMKubernetesClusterNodePool_nodePublicIP(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + enable_node_public_ip = true +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_nodeTaints(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + node_taints = [ + "key=value:NoSchedule" + ] +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_requiresImport(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_manualScale(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "import" { + name = azurerm_kubernetes_cluster_node_pool.test.name + kubernetes_cluster_id = azurerm_kubernetes_cluster_node_pool.test.kubernetes_cluster_id + vm_size = azurerm_kubernetes_cluster_node_pool.test.vm_size + node_count = azurerm_kubernetes_cluster_node_pool.test.node_count +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_template(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + os_disk_size_gb = 100 +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_templateVirtualNetwork(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + enable_auto_scaling = true + min_count = 1 + max_count = 3 + vnet_subnet_id = azurerm_subnet.test.id +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_virtualNetworkManual(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_templateVirtualNetwork(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + vnet_subnet_id = azurerm_subnet.test.id +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_windows(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_templateWindows(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "windoz" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + os_type = "Windows" +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_windowsAndLinux(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesClusterNodePool_templateWindows(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "linux" { + name = "linux" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 +} + +resource "azurerm_kubernetes_cluster_node_pool" "windows" { + name = "windoz" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + os_type = "Windows" +} +`, template) +} + +func testAccAzureRMKubernetesClusterNodePool_template(rInt int, clientId, clientSecret, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesClusterNodePool_templateVirtualNetwork(rInt int, clientId, clientSecret, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_route_table" "test" { + name = "acctestrt-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + route { + name = "akc-route-%d" + address_prefix = "10.100.0.0/14" + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = "10.10.1.1" + } +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.1.0.0/24" + + # TODO: remove in 2.0 + lifecycle { + ignore_changes = ["route_table_id"] + } +} + +resource "azurerm_subnet_route_table_association" "test" { + subnet_id = azurerm_subnet.test.id + route_table_id = azurerm_route_table.test.id +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesClusterNodePool_templateWindows(rInt int, clientId, clientSecret, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + windows_profile { + admin_username = "azureuser" + admin_password = "P@55W0rd1234!" + } + + network_profile { + network_plugin = "azure" + network_policy = "azure" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} diff --git a/website/azurerm.erb b/website/azurerm.erb index b1db55962d9f..eb00f451d17b 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -856,6 +856,10 @@
  • azurerm_kubernetes_cluster
  • + +
  • + azurerm_kubernetes_cluster_node_pool +
  • diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index f02a0c1a7803..6887b2ad48eb 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -2,7 +2,7 @@ subcategory: "Container" layout: "azurerm" page_title: "Azure Resource Manager: azurerm_kubernetes_cluster" -sidebar_current: "docs-azurerm-resource-container-kubernetes-cluster" +sidebar_current: "docs-azurerm-resource-container-kubernetes-cluster-x" description: |- Manages a managed Kubernetes Cluster (also known as AKS / Azure Kubernetes Service) --- @@ -166,7 +166,7 @@ A `agent_pool_profile` block supports the following: * `vm_size` - (Required) The size of each VM in the Agent Pool (e.g. `Standard_F1`). Changing this forces a new resource to be created. -* `availability_zones` - (Optional) Availability zones for nodes. The property `type` of the `agent_pool_profile` must be set to `VirtualMachineScaleSets` in order to use availability zones. +* `availability_zones` - (Optional) Availability zones for nodes. The property `type` of the `agent_pool_profile` must be set to `VirtualMachineScaleSets` in order to use availability zones. * `enable_auto_scaling` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Note that auto scaling feature requires the that the `type` is set to `VirtualMachineScaleSets` diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown new file mode 100644 index 000000000000..b70d3db4fba9 --- /dev/null +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -0,0 +1,114 @@ +--- +subcategory: "Container" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_kubernetes_cluster_node_pool" +sidebar_current: "docs-azurerm-resource-container-kubernetes-cluster-node-pool" +description: |- + Manages a Node Pool within a Kubernetes Cluster +--- + +# azurerm_kubernetes_cluster_node_pool + +Manages a Node Pool within a Kubernetes Cluster + +~> **NOTE:** Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_kubernetes_cluster" "example" { + name = "example-aks1" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + dns_prefix = "exampleaks1" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_D2_v2" + } + + service_principal { + client_id = "00000000-0000-0000-0000-000000000000" + client_secret = "00000000000000000000000000000000" + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "example" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id + vm_size = "Standard_DS2_v2" + node_count = 1 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. + +-> **NOTE:** A Windows Node Pool cannot have a `name` longer than 6 characters. + +* `kubernetes_cluster_id` - (Required) The ID of the Kubernetes Cluster where this Node Pool should exist. Changing this forces a new resource to be created. + +* `vm_size` - (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + +--- + +* `availability_zones` - (Optional) A list of Availability Zones where the Nodes in this Node Pool should be created in. + +* `enable_auto_scaling` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Defaults to `false`. + +-> **NOTE:** Additional fields must be configured depending on the value of this field - see below. + +* `enable_node_public_ip` - (Optional) Should each node have a Public IP Address? Defaults to `false`. + +* `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. + +* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). + +* `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + +* `os_type` - (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. + +* `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist. Possible values are `Linux` and `Windows`. Defaults to `Linux`. + +~> **NOTE:** A route table must be configured on this Subnet. + +--- + +When `enable_auto_scaling` is set to `true` the following fields are applicable: + +* `max_count` - (Required) The maximum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be greater than or equal to `min_count`. + +* `min_count` - (Required) The minimum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be less than or equal to `max_count`. + +* `node_count` - (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be a value in the range `min_count` - `max_count`. + +-> **NOTE:** If you're specifying an initial number of nodes you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to this field. + +When `enable_auto_scaling` is set to `false` the following fields are applicable: + +* `node_count` - (Required) The number of nodes which should exist within this Node Pool. Valid values are between `1` and `100`. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Kubernetes Cluster Node Pool. + +--- + +## Import + +Kubernetes Cluster Node Pools can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_kubernetes_cluster_node_pool.pool1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ContainerService/managedClusters/cluster1/agentPools/pool1 +```