Skip to content

Commit

Permalink
Aks autoscale (#3361)
Browse files Browse the repository at this point in the history
  • Loading branch information
jlpedrosa authored and katbyte committed Jul 3, 2019
1 parent 973d7a1 commit 6f8adc9
Show file tree
Hide file tree
Showing 7 changed files with 340 additions and 3 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Expand Up @@ -42,3 +42,6 @@ examples/**/test.tf
examples/**/test.tfvars
examples/**/terraform
examples/**/terraform.zip

#never upload the build to git
terraform-provider-azurerm
37 changes: 37 additions & 0 deletions azurerm/data_source_kubernetes_cluster.go
Expand Up @@ -87,6 +87,29 @@ func dataSourceArmKubernetesCluster() *schema.Resource {
Computed: true,
},

"max_count": {
Type: schema.TypeInt,
Computed: true,
},

"min_count": {
Type: schema.TypeInt,
Computed: true,
},

"enable_auto_scaling": {
Type: schema.TypeBool,
Computed: true,
},

"availability_zones": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},

// TODO: remove this in a future version
"dns_prefix": {
Type: schema.TypeString,
Expand Down Expand Up @@ -565,6 +588,20 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi
agentPoolProfile["count"] = int(*profile.Count)
}

if profile.MinCount != nil {
agentPoolProfile["min_count"] = int(*profile.MinCount)
}

if profile.MaxCount != nil {
agentPoolProfile["max_count"] = int(*profile.MaxCount)
}

if profile.EnableAutoScaling != nil {
agentPoolProfile["enable_auto_scaling"] = *profile.EnableAutoScaling
}

agentPoolProfile["availability_zones"] = utils.FlattenStringSlice(profile.AvailabilityZones)

if profile.Name != nil {
agentPoolProfile["name"] = *profile.Name
}
Expand Down
82 changes: 82 additions & 0 deletions azurerm/data_source_kubernetes_cluster_test.go
Expand Up @@ -417,6 +417,64 @@ func TestAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting(t *testing.T)
})
}

func TestAccDataSourceAzureRMKubernetesCluster_autoscalingNoAvailabilityZones(t *testing.T) {
dataSourceName := "data.azurerm_kubernetes_cluster.test"
ri := tf.AccRandTimeInt()
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")

config := testAccDataSourceAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(ri, clientId, clientSecret, testLocation())

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(dataSourceName),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.min_count", "1"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.max_count", "2"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.enable_auto_scaling", "true"),
resource.TestCheckNoResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones"),
),
},
},
})
}

func TestAccDataSourceAzureRMKubernetesCluster_autoscalingWithAvailabilityZones(t *testing.T) {
dataSourceName := "data.azurerm_kubernetes_cluster.test"
ri := tf.AccRandTimeInt()
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")

config := testAccDataSourceAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(ri, clientId, clientSecret, testLocation())

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(dataSourceName),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.min_count", "1"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.max_count", "2"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.enable_auto_scaling", "true"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones.#", "2"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones.0", "1"),
resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones.1", "2"),
),
},
},
})
}

func testAccDataSourceAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string {
r := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location)
return fmt.Sprintf(`
Expand Down Expand Up @@ -584,3 +642,27 @@ data "azurerm_kubernetes_cluster" "test" {
}
`, r)
}

func testAccDataSourceAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string {
r := testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(rInt, clientId, clientSecret, location)
return fmt.Sprintf(`
%s
data "azurerm_kubernetes_cluster" "test" {
name = "${azurerm_kubernetes_cluster.test.name}"
resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}"
}
`, r)
}

func testAccDataSourceAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string {
r := testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(rInt, clientId, clientSecret, location)
return fmt.Sprintf(`
%s
data "azurerm_kubernetes_cluster" "test" {
name = "${azurerm_kubernetes_cluster.test.name}"
resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}"
}
`, r)
}
77 changes: 74 additions & 3 deletions azurerm/resource_arm_kubernetes_cluster.go
Expand Up @@ -119,6 +119,31 @@ func resourceArmKubernetesCluster() *schema.Resource {
ValidateFunc: validation.IntBetween(1, 100),
},

"max_count": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(1, 100),
},

"min_count": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(1, 100),
},

"enable_auto_scaling": {
Type: schema.TypeBool,
Optional: true,
},

"availability_zones": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},

// TODO: remove this field in the next major version
"dns_prefix": {
Type: schema.TypeString,
Expand Down Expand Up @@ -563,7 +588,11 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter
kubernetesVersion := d.Get("kubernetes_version").(string)

linuxProfile := expandKubernetesClusterLinuxProfile(d)
agentProfiles := expandKubernetesClusterAgentPoolProfiles(d)
agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d)
if err != nil {
return err
}

servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d)
networkProfile := expandKubernetesClusterNetworkProfile(d)
addonProfiles := expandKubernetesClusterAddonProfiles(d)
Expand Down Expand Up @@ -902,7 +931,8 @@ func flattenKubernetesClusterAddonProfiles(profile map[string]*containerservice.
return []interface{}{values}
}

func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) []containerservice.ManagedClusterAgentPoolProfile {
func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) ([]containerservice.ManagedClusterAgentPoolProfile, error) {

configs := d.Get("agent_pool_profile").([]interface{})

profiles := make([]containerservice.ManagedClusterAgentPoolProfile, 0)
Expand Down Expand Up @@ -933,10 +963,37 @@ func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) []containe
if vnetSubnetID != "" {
profile.VnetSubnetID = utils.String(vnetSubnetID)
}

if maxCount := int32(config["max_count"].(int)); maxCount > 0 {
profile.MaxCount = utils.Int32(maxCount)
}

if minCount := int32(config["min_count"].(int)); minCount > 0 {
profile.MinCount = utils.Int32(minCount)
}

if enableAutoScalingItf := config["enable_auto_scaling"]; enableAutoScalingItf != nil {
profile.EnableAutoScaling = utils.Bool(enableAutoScalingItf.(bool))

// Auto scaling will change the number of nodes, but the original count number should not be sent again.
// This avoid the cluster being resized after creation.
if *profile.EnableAutoScaling && !d.IsNewResource() {
profile.Count = nil
}
}

if availavilityZones := utils.ExpandStringSlice(config["availability_zones"].([]interface{})); len(*availavilityZones) > 0 {
profile.AvailabilityZones = availavilityZones
}

if *profile.EnableAutoScaling && (profile.MinCount == nil || profile.MaxCount == nil) {
return nil, fmt.Errorf("Can't create an AKS cluster with autoscaling enabled but not setting min_count or max_count")
}

profiles = append(profiles, profile)
}

return profiles
return profiles, nil
}

func flattenKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.ManagedClusterAgentPoolProfile, fqdn *string) []interface{} {
Expand All @@ -957,6 +1014,20 @@ func flattenKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.Mana
agentPoolProfile["count"] = int(*profile.Count)
}

if profile.MinCount != nil {
agentPoolProfile["min_count"] = int(*profile.MinCount)
}

if profile.MaxCount != nil {
agentPoolProfile["max_count"] = int(*profile.MaxCount)
}

if profile.EnableAutoScaling != nil {
agentPoolProfile["enable_auto_scaling"] = *profile.EnableAutoScaling
}

agentPoolProfile["availability_zones"] = utils.FlattenStringSlice(profile.AvailabilityZones)

if fqdn != nil {
// temporarily persist the parent FQDN here until `fqdn` is removed from the `agent_pool_profile`
agentPoolProfile["fqdn"] = *fqdn
Expand Down

0 comments on commit 6f8adc9

Please sign in to comment.