Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding upgrade settings to all node pool blocks #10376

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -215,6 +215,8 @@ func dataSourceKubernetesCluster() *schema.Resource {
Type: schema.TypeBool,
Computed: true,
},

"upgrade_settings": upgradeSettingsForDataSourceSchema(),
},
},
},
Expand Down Expand Up @@ -870,75 +872,92 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi
}

for _, profile := range *input {
agentPoolProfile := make(map[string]interface{})

if profile.Type != "" {
agentPoolProfile["type"] = string(profile.Type)
}

count := 0
if profile.Count != nil {
agentPoolProfile["count"] = int(*profile.Count)
count = int(*profile.Count)
}

minCount := 0
if profile.MinCount != nil {
agentPoolProfile["min_count"] = int(*profile.MinCount)
minCount = int(*profile.MinCount)
}

maxCount := 0
if profile.MaxCount != nil {
agentPoolProfile["max_count"] = int(*profile.MaxCount)
maxCount = int(*profile.MaxCount)
}

enableAutoScaling := false
if profile.EnableAutoScaling != nil {
agentPoolProfile["enable_auto_scaling"] = *profile.EnableAutoScaling
enableAutoScaling = *profile.EnableAutoScaling
}

agentPoolProfile["availability_zones"] = utils.FlattenStringSlice(profile.AvailabilityZones)

name := ""
if profile.Name != nil {
agentPoolProfile["name"] = *profile.Name
}

if profile.VMSize != "" {
agentPoolProfile["vm_size"] = string(profile.VMSize)
name = *profile.Name
}

osDiskSizeGb := 0
if profile.OsDiskSizeGB != nil {
agentPoolProfile["os_disk_size_gb"] = int(*profile.OsDiskSizeGB)
osDiskSizeGb = int(*profile.OsDiskSizeGB)
}

vnetSubnetId := ""
if profile.VnetSubnetID != nil {
agentPoolProfile["vnet_subnet_id"] = *profile.VnetSubnetID
}

if profile.OsType != "" {
agentPoolProfile["os_type"] = string(profile.OsType)
vnetSubnetId = *profile.VnetSubnetID
}

orchestratorVersion := ""
if profile.OrchestratorVersion != nil && *profile.OrchestratorVersion != "" {
agentPoolProfile["orchestrator_version"] = *profile.OrchestratorVersion
orchestratorVersion = *profile.OrchestratorVersion
}

maxPods := 0
if profile.MaxPods != nil {
agentPoolProfile["max_pods"] = int(*profile.MaxPods)
maxPods = int(*profile.MaxPods)
}

nodeLabels := make(map[string]string)
if profile.NodeLabels != nil {
agentPoolProfile["node_labels"] = profile.NodeLabels
for k, v := range profile.NodeLabels {
if v == nil {
continue
}

nodeLabels[k] = *v
}
}

nodeTaints := make([]string, 0)
if profile.NodeTaints != nil {
agentPoolProfile["node_taints"] = *profile.NodeTaints
nodeTaints = *profile.NodeTaints
}

enableNodePublicIP := false
if profile.EnableNodePublicIP != nil {
agentPoolProfile["enable_node_public_ip"] = *profile.EnableNodePublicIP
enableNodePublicIP = *profile.EnableNodePublicIP
}

if profile.Tags != nil {
agentPoolProfile["tags"] = tags.Flatten(profile.Tags)
}

agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile)
agentPoolProfiles = append(agentPoolProfiles, map[string]interface{}{
"availability_zones": utils.FlattenStringSlice(profile.AvailabilityZones),
"count": count,
"enable_auto_scaling": enableAutoScaling,
"enable_node_public_ip": enableNodePublicIP,
"max_count": maxCount,
"max_pods": maxPods,
"min_count": minCount,
"name": name,
"node_labels": nodeLabels,
"node_taints": nodeTaints,
"orchestrator_version": orchestratorVersion,
"os_disk_size_gb": osDiskSizeGb,
"os_type": string(profile.OsType),
"tags": tags.Flatten(profile.Tags),
"type": string(profile.Type),
"upgrade_settings": flattenUpgradeSettings(profile.UpgradeSettings),
"vm_size": string(profile.VMSize),
"vnet_subnet_id": vnetSubnetId,
})
}

return agentPoolProfiles
Expand Down
Expand Up @@ -140,6 +140,8 @@ func dataSourceKubernetesClusterNodePool() *schema.Resource {

"tags": tags.SchemaDataSource(),

"upgrade_settings": upgradeSettingsForDataSourceSchema(),

"vm_size": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -276,6 +278,10 @@ func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interf
}
d.Set("spot_max_price", spotMaxPrice)

if err := d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings)); err != nil {
return fmt.Errorf("setting `upgrade_settings`: %+v", err)
}

d.Set("vnet_subnet_id", props.VnetSubnetID)
d.Set("vm_size", string(props.VMSize))
}
Expand Down
Expand Up @@ -224,6 +224,8 @@ func resourceKubernetesClusterNodePool() *schema.Resource {
ForceNew: true,
ValidateFunc: azure.ValidateResourceID,
},

"upgrade_settings": upgradeSettingsSchema(),
},
}
}
Expand Down Expand Up @@ -302,6 +304,7 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf
Type: containerservice.VirtualMachineScaleSets,
VMSize: containerservice.VMSizeTypes(vmSize),
EnableEncryptionAtHost: utils.Bool(enableHostEncryption),
UpgradeSettings: expandUpgradeSettings(d.Get("upgrade_settings").([]interface{})),

// this must always be sent during creation, but is optional for auto-scaled clusters during update
Count: utils.Int32(int32(count)),
Expand Down Expand Up @@ -517,6 +520,11 @@ func resourceKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interf
props.Tags = tags.Expand(t)
}

if d.HasChange("upgrade_settings") {
upgradeSettingsRaw := d.Get("upgrade_settings").([]interface{})
props.UpgradeSettings = expandUpgradeSettings(upgradeSettingsRaw)
}

// validate the auto-scale fields are both set/unset to prevent a continual diff
maxCount := 0
if props.MaxCount != nil {
Expand Down Expand Up @@ -681,6 +689,10 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac

d.Set("vnet_subnet_id", props.VnetSubnetID)
d.Set("vm_size", string(props.VMSize))

if err := d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings)); err != nil {
return fmt.Errorf("setting `upgrade_settings`: %+v", err)
}
}

return tags.FlattenAndSet(d, resp.Tags)
Expand All @@ -707,3 +719,64 @@ func resourceKubernetesClusterNodePoolDelete(d *schema.ResourceData, meta interf

return nil
}

func upgradeSettingsSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_surge": {
Type: schema.TypeString,
Required: true,
},
},
},
}
}

func upgradeSettingsForDataSourceSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_surge": {
Type: schema.TypeString,
Computed: true,
},
},
},
}
}

func expandUpgradeSettings(input []interface{}) *containerservice.AgentPoolUpgradeSettings {
setting := &containerservice.AgentPoolUpgradeSettings{}
if len(input) == 0 {
return setting
}

v := input[0].(map[string]interface{})
if maxSurgeRaw := v["max_surge"].(string); maxSurgeRaw != "" {
setting.MaxSurge = utils.String(maxSurgeRaw)
}
return setting
}

func flattenUpgradeSettings(input *containerservice.AgentPoolUpgradeSettings) []interface{} {
maxSurge := ""
if input != nil && input.MaxSurge != nil {
maxSurge = *input.MaxSurge
}

if maxSurge == "" {
return []interface{}{}
}

return []interface{}{
map[string]interface{}{
"max_surge": maxSurge,
},
}
}
Expand Up @@ -42,6 +42,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){
"osDiskType": testAccKubernetesClusterNodePool_osDiskType,
"modeSystem": testAccKubernetesClusterNodePool_modeSystem,
"modeUpdate": testAccKubernetesClusterNodePool_modeUpdate,
"upgradeSettings": testAccKubernetesClusterNodePool_upgradeSettings,
"virtualNetworkAutomatic": testAccKubernetesClusterNodePool_virtualNetworkAutomatic,
"virtualNetworkManual": testAccKubernetesClusterNodePool_virtualNetworkManual,
"windows": testAccKubernetesClusterNodePool_windows,
Expand Down Expand Up @@ -596,6 +597,45 @@ func testAccKubernetesClusterNodePool_spot(t *testing.T) {
})
}

func TestAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccKubernetesClusterNodePool_upgradeSettings(t)
}

func testAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
r := KubernetesClusterNodePoolResource{}

data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.upgradeSettingsConfig(data, "2"),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("2"),
),
},
data.ImportStep(),
tombuildsstuff marked this conversation as resolved.
Show resolved Hide resolved
{
Config: r.upgradeSettingsConfig(data, "4"),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("4"),
),
},
data.ImportStep(),
{
Config: r.upgradeSettingsConfig(data, ""),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("0"),
),
},
data.ImportStep(),
})
}

func TestAccKubernetesClusterNodePool_virtualNetworkAutomatic(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccKubernetesClusterNodePool_virtualNetworkAutomatic(t)
Expand Down Expand Up @@ -1349,6 +1389,31 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
`, r.templateConfig(data))
}

func (r KubernetesClusterNodePoolResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string {
template := r.templateConfig(data)
if maxSurge != "" {
maxSurge = fmt.Sprintf(`upgrade_settings {
max_surge = %q
}`, maxSurge)
}

return fmt.Sprintf(`
provider "azurerm" {
features {}
}

%s

resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS2_v2"
node_count = 3
%s
}
`, template, maxSurge)
}

func (r KubernetesClusterNodePoolResource) virtualNetworkAutomaticConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down
Expand Up @@ -140,3 +140,40 @@ resource "azurerm_kubernetes_cluster" "test" {
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, controlPlaneVersion)
}

func (r KubernetesClusterResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string {
if maxSurge != "" {
maxSurge = fmt.Sprintf(`upgrade_settings {
max_surge = %q
}`, maxSurge)
}

return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}

resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"

default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
%s
}

identity {
type = "SystemAssigned"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, maxSurge)
}