Skip to content

Commit

Permalink
Adding upgrade settings to all node pool blocks
Browse files Browse the repository at this point in the history
Co-authored-by: Even Holthe <even.holthe@me.com>
  • Loading branch information
jmcshane and evenh committed Feb 15, 2021
1 parent e646c55 commit 0ea3e92
Show file tree
Hide file tree
Showing 9 changed files with 154 additions and 0 deletions.
Expand Up @@ -215,6 +215,8 @@ func dataSourceKubernetesCluster() *schema.Resource {
Type: schema.TypeBool,
Computed: true,
},

"upgrade_settings": upgradeSettingsSchema(),
},
},
},
Expand Down Expand Up @@ -938,6 +940,10 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi
agentPoolProfile["tags"] = tags.Flatten(profile.Tags)
}

if profile.UpgradeSettings != nil {
agentPoolProfile["upgrade_settings"] = flattenUpgradeSettings(profile.UpgradeSettings)
}

agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile)
}

Expand Down
Expand Up @@ -140,6 +140,8 @@ func dataSourceKubernetesClusterNodePool() *schema.Resource {

"tags": tags.SchemaDataSource(),

"upgrade_settings": upgradeSettingsSchema(),

"vm_size": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -276,6 +278,8 @@ func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interf
}
d.Set("spot_max_price", spotMaxPrice)

d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings))

d.Set("vnet_subnet_id", props.VnetSubnetID)
d.Set("vm_size", string(props.VMSize))
}
Expand Down
Expand Up @@ -224,6 +224,8 @@ func resourceKubernetesClusterNodePool() *schema.Resource {
ForceNew: true,
ValidateFunc: azure.ValidateResourceID,
},

"upgrade_settings": upgradeSettingsSchema(),
},
}
}
Expand Down Expand Up @@ -365,6 +367,10 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf
profile.VnetSubnetID = utils.String(vnetSubnetID)
}

if upgradeSettingsRaw, ok := d.Get("upgrade_settings").([]interface{}); ok && len(upgradeSettingsRaw) > 0 {
profile.UpgradeSettings = expandUpgradeSettings(upgradeSettingsRaw)
}

maxCount := d.Get("max_count").(int)
minCount := d.Get("min_count").(int)

Expand Down Expand Up @@ -517,6 +523,11 @@ func resourceKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interf
props.Tags = tags.Expand(t)
}

if d.HasChange("upgrade_settings") {
upgradeSettingsRaw := d.Get("upgrade_settings").([]interface{})
props.UpgradeSettings = expandUpgradeSettings(upgradeSettingsRaw)
}

// validate the auto-scale fields are both set/unset to prevent a continual diff
maxCount := 0
if props.MaxCount != nil {
Expand Down Expand Up @@ -681,6 +692,8 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac

d.Set("vnet_subnet_id", props.VnetSubnetID)
d.Set("vm_size", string(props.VMSize))

d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings))
}

return tags.FlattenAndSet(d, resp.Tags)
Expand All @@ -707,3 +720,43 @@ func resourceKubernetesClusterNodePoolDelete(d *schema.ResourceData, meta interf

return nil
}

func upgradeSettingsSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_surge": {
Type: schema.TypeString,
Optional: true,
},
},
},
}
}

func expandUpgradeSettings(input []interface{}) *containerservice.AgentPoolUpgradeSettings {
if len(input) == 0 {
return nil
}
upgradeSettingInput := input[0].(map[string]interface{})
upgradeSetting := containerservice.AgentPoolUpgradeSettings{}

if maxSurgeRaw := upgradeSettingInput["max_surge"].(string); maxSurgeRaw != "" {
upgradeSetting.MaxSurge = utils.String(maxSurgeRaw)
}
return &upgradeSetting
}

func flattenUpgradeSettings(input *containerservice.AgentPoolUpgradeSettings) []interface{} {
upgradeSettings := make([]interface{}, 0)

if input == nil {
return upgradeSettings
}
nodePoolSetting := make(map[string]interface{})
nodePoolSetting["max_surge"] = *input.MaxSurge
return append(upgradeSettings, nodePoolSetting)
}
Expand Up @@ -42,6 +42,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){
"osDiskType": testAccKubernetesClusterNodePool_osDiskType,
"modeSystem": testAccKubernetesClusterNodePool_modeSystem,
"modeUpdate": testAccKubernetesClusterNodePool_modeUpdate,
"upgradeSettings": testAccKubernetesClusterNodePool_upgradeSettings,
"virtualNetworkAutomatic": testAccKubernetesClusterNodePool_virtualNetworkAutomatic,
"virtualNetworkManual": testAccKubernetesClusterNodePool_virtualNetworkManual,
"windows": testAccKubernetesClusterNodePool_windows,
Expand Down Expand Up @@ -596,6 +597,27 @@ func testAccKubernetesClusterNodePool_spot(t *testing.T) {
})
}

func TestAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccKubernetesClusterNodePool_upgradeSettings(t)
}

func testAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
r := KubernetesClusterNodePoolResource{}

data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.upgradeSettingsConfig(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("2"),
),
},
data.ImportStep(),
})
}

func TestAccKubernetesClusterNodePool_virtualNetworkAutomatic(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccKubernetesClusterNodePool_virtualNetworkAutomatic(t)
Expand Down Expand Up @@ -1349,6 +1371,26 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
`, r.templateConfig(data))
}

func (r KubernetesClusterNodePoolResource) upgradeSettingsConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
%s
resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS2_v2"
node_count = 3
upgrade_settings {
max_surge = "2"
}
}
`, r.templateConfig(data))
}

func (r KubernetesClusterNodePoolResource) virtualNetworkAutomaticConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down
10 changes: 10 additions & 0 deletions azurerm/internal/services/containers/kubernetes_nodepool.go
Expand Up @@ -165,6 +165,8 @@ func SchemaDefaultNodePool() *schema.Schema {
Optional: true,
ForceNew: true,
},

"upgrade_settings": upgradeSettingsSchema(),
},
},
}
Expand Down Expand Up @@ -197,6 +199,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA
NodeLabels: defaultCluster.NodeLabels,
NodeTaints: defaultCluster.NodeTaints,
Tags: defaultCluster.Tags,
UpgradeSettings: defaultCluster.UpgradeSettings,
},
}
}
Expand Down Expand Up @@ -281,6 +284,10 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC
profile.ProximityPlacementGroupID = utils.String(proximityPlacementGroupId)
}

if upgradeSettingsRaw, ok := raw["upgrade_settings"].([]interface{}); ok && len(upgradeSettingsRaw) > 0 {
profile.UpgradeSettings = expandUpgradeSettings(upgradeSettingsRaw)
}

count := raw["node_count"].(int)
maxCount := raw["max_count"].(int)
minCount := raw["min_count"].(int)
Expand Down Expand Up @@ -430,6 +437,8 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
proximityPlacementGroupId = *agentPool.ProximityPlacementGroupID
}

upgradeSettings := flattenUpgradeSettings(agentPool.UpgradeSettings)

return &[]interface{}{
map[string]interface{}{
"availability_zones": availabilityZones,
Expand All @@ -450,6 +459,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
"vm_size": string(agentPool.VMSize),
"orchestrator_version": orchestratorVersion,
"proximity_placement_group_id": proximityPlacementGroupId,
"upgrade_settings": upgradeSettings,
"vnet_subnet_id": vnetSubnetId,
"only_critical_addons_enabled": criticalAddonsEnabled,
},
Expand Down
9 changes: 9 additions & 0 deletions website/docs/d/kubernetes_cluster.html.markdown
Expand Up @@ -126,12 +126,21 @@ A `agent_pool_profile` block exports the following:

* `orchestrator_version` - Kubernetes version used for the Agents.

* `upgrade_settings` - A `upgrade_settings` block as documented below.

* `vm_size` - The size of each VM in the Agent Pool (e.g. `Standard_F1`).

* `vnet_subnet_id` - The ID of the Subnet where the Agents in the Pool are provisioned.

---

A `upgrade_settings` block exports the following:

* `max_surge` - The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade.

---


A `azure_active_directory` block exports the following:

* `admin_group_object_ids` - The list of Object IDs of Azure Active Directory Groups which have Admin Role on the Cluster (when using a Managed integration).
Expand Down
9 changes: 9 additions & 0 deletions website/docs/d/kubernetes_cluster_node_pool.html.markdown
Expand Up @@ -78,10 +78,19 @@ In addition to the Arguments listed above - the following Attributes are exporte

* `tags` - A mapping of tags assigned to the Kubernetes Cluster Node Pool.

* `upgrade_settings` - A `upgrade_settings` block as documented below.

* `vm_size` - The size of the Virtual Machines used in the Virtual Machine Scale Set backing this Node Pool.

* `vnet_subnet_id` - The ID of the Subnet in which this Node Pool exists.

---

A `upgrade_settings` block exports the following:

* `max_surge` - The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade.


## Timeouts

The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions:
Expand Down
10 changes: 10 additions & 0 deletions website/docs/r/kubernetes_cluster.html.markdown
Expand Up @@ -301,6 +301,8 @@ A `default_node_pool` block supports the following:

~> At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) until this is fixed in the AKS API.

* `upgrade_settings` - (Optional) A `upgrade_settings` block as documented below.

* `vnet_subnet_id` - (Optional) The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.

~> **NOTE:** A Route Table must be configured on this Subnet.
Expand Down Expand Up @@ -446,6 +448,14 @@ A `windows_profile` block supports the following:

* `admin_password` - (Required) The Admin Password for Windows VMs. Length must be between 14 and 123 characters.

---

A `upgrade_settings` block exports the following:

* `max_surge` - The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade.

-> **Note:** If a percentage is provided, the number of surge nodes is calculated from the `node_count` value on the current cluster. Node surge can allow a cluster to have more nodes than `max_count` during an upgrade. Ensure that your cluster has enough [IP space](https://docs.microsoft.com/en-us/azure/aks/upgrade-cluster#customize-node-surge-upgrade) during an upgrade.


## Attributes Reference

Expand Down
11 changes: 11 additions & 0 deletions website/docs/r/kubernetes_cluster_node_pool.html.markdown
Expand Up @@ -117,6 +117,8 @@ The following arguments are supported:

~> At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) until this is fixed in the AKS API.

* `upgrade_settings` - (Optional) A `upgrade_settings` block as documented below.

* `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist.

-> **NOTE:** At this time the `vnet_subnet_id` must be the same for all node pools in the cluster
Expand All @@ -139,6 +141,15 @@ If `enable_auto_scaling` is set to `false`, then the following fields can also b

* `node_count` - (Required) The number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000`.

---

A `upgrade_settings` block exports the following:

* `max_surge` - The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade.

-> **Note:** If a percentage is provided, the number of surge nodes is calculated from the current node count on the cluster. Node surge can allow a cluster to have more nodes than `max_count` during an upgrade. Ensure that your cluster has enough [IP space](https://docs.microsoft.com/en-us/azure/aks/upgrade-cluster#customize-node-surge-upgrade) during an upgrade.


## Attributes Reference

The following attributes are exported:
Expand Down

0 comments on commit 0ea3e92

Please sign in to comment.