Skip to content

Commit

Permalink
r/kubernetes_cluster: fixing pr comments
Browse files Browse the repository at this point in the history
  • Loading branch information
tombuildsstuff committed Feb 16, 2021
1 parent 0ea3e92 commit 540d4f3
Show file tree
Hide file tree
Showing 10 changed files with 206 additions and 66 deletions.
Expand Up @@ -216,7 +216,7 @@ func dataSourceKubernetesCluster() *schema.Resource {
Computed: true,
},

"upgrade_settings": upgradeSettingsSchema(),
"upgrade_settings": upgradeSettingsForDataSourceSchema(),
},
},
},
Expand Down Expand Up @@ -872,79 +872,107 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi
}

for _, profile := range *input {
agentPoolProfile := make(map[string]interface{})

agentType := ""
if profile.Type != "" {
agentPoolProfile["type"] = string(profile.Type)
agentType = string(profile.Type)
}

count := 0
if profile.Count != nil {
agentPoolProfile["count"] = int(*profile.Count)
count = int(*profile.Count)
}

minCount := 0
if profile.MinCount != nil {
agentPoolProfile["min_count"] = int(*profile.MinCount)
minCount = int(*profile.MinCount)
}

maxCount := 0
if profile.MaxCount != nil {
agentPoolProfile["max_count"] = int(*profile.MaxCount)
maxCount = int(*profile.MaxCount)
}

enableAutoScaling := false
if profile.EnableAutoScaling != nil {
agentPoolProfile["enable_auto_scaling"] = *profile.EnableAutoScaling
enableAutoScaling = *profile.EnableAutoScaling
}

agentPoolProfile["availability_zones"] = utils.FlattenStringSlice(profile.AvailabilityZones)

name := ""
if profile.Name != nil {
agentPoolProfile["name"] = *profile.Name
name = *profile.Name
}

vmSize := ""
if profile.VMSize != "" {
agentPoolProfile["vm_size"] = string(profile.VMSize)
vmSize = string(profile.VMSize)
}

osDiskSizeGb := 0
if profile.OsDiskSizeGB != nil {
agentPoolProfile["os_disk_size_gb"] = int(*profile.OsDiskSizeGB)
osDiskSizeGb = int(*profile.OsDiskSizeGB)
}

vnetSubnetId := ""
if profile.VnetSubnetID != nil {
agentPoolProfile["vnet_subnet_id"] = *profile.VnetSubnetID
vnetSubnetId = *profile.VnetSubnetID
}

osType := ""
if profile.OsType != "" {
agentPoolProfile["os_type"] = string(profile.OsType)
osType = string(profile.OsType)
}

orchestratorVersion := ""
if profile.OrchestratorVersion != nil && *profile.OrchestratorVersion != "" {
agentPoolProfile["orchestrator_version"] = *profile.OrchestratorVersion
orchestratorVersion = *profile.OrchestratorVersion
}

maxPods := 0
if profile.MaxPods != nil {
agentPoolProfile["max_pods"] = int(*profile.MaxPods)
maxPods = int(*profile.MaxPods)
}

nodeLabels := make(map[string]string)
if profile.NodeLabels != nil {
agentPoolProfile["node_labels"] = profile.NodeLabels
for k, v := range profile.NodeLabels {
if v == nil {
continue
}

nodeLabels[k] = *v
}
}

nodeTaints := make([]string, 0)
if profile.NodeTaints != nil {
agentPoolProfile["node_taints"] = *profile.NodeTaints
nodeTaints = *profile.NodeTaints
}

enableNodePublicIP := false
if profile.EnableNodePublicIP != nil {
agentPoolProfile["enable_node_public_ip"] = *profile.EnableNodePublicIP
enableNodePublicIP = *profile.EnableNodePublicIP
}

if profile.Tags != nil {
agentPoolProfile["tags"] = tags.Flatten(profile.Tags)
}

if profile.UpgradeSettings != nil {
agentPoolProfile["upgrade_settings"] = flattenUpgradeSettings(profile.UpgradeSettings)
}

agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile)
agentPoolProfiles = append(agentPoolProfiles, map[string]interface{}{
"availability_zones": utils.FlattenStringSlice(profile.AvailabilityZones),
"count": count,
"enable_auto_scaling": enableAutoScaling,
"enable_node_public_ip": enableNodePublicIP,
"max_count": maxCount,
"max_pods": maxPods,
"min_count": minCount,
"name": name,
"node_labels": nodeLabels,
"node_taints": nodeTaints,
"orchestrator_version": orchestratorVersion,
"os_disk_size_gb": osDiskSizeGb,
"os_type": osType,
"tags": tags.Flatten(profile.Tags),
"type": agentType,
"upgrade_settings": flattenUpgradeSettings(profile.UpgradeSettings),
"vm_size": vmSize,
"vnet_subnet_id": vnetSubnetId,
})
}

return agentPoolProfiles
Expand Down
Expand Up @@ -140,7 +140,7 @@ func dataSourceKubernetesClusterNodePool() *schema.Resource {

"tags": tags.SchemaDataSource(),

"upgrade_settings": upgradeSettingsSchema(),
"upgrade_settings": upgradeSettingsForDataSourceSchema(),

"vm_size": {
Type: schema.TypeString,
Expand Down Expand Up @@ -278,7 +278,9 @@ func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interf
}
d.Set("spot_max_price", spotMaxPrice)

d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings))
if err := d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings)); err != nil {
return fmt.Errorf("setting `upgrade_settings`: %+v", err)
}

d.Set("vnet_subnet_id", props.VnetSubnetID)
d.Set("vm_size", string(props.VMSize))
Expand Down
Expand Up @@ -304,6 +304,7 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf
Type: containerservice.VirtualMachineScaleSets,
VMSize: containerservice.VMSizeTypes(vmSize),
EnableEncryptionAtHost: utils.Bool(enableHostEncryption),
UpgradeSettings: expandUpgradeSettings(d.Get("upgrade_settings").([]interface{})),

// this must always be sent during creation, but is optional for auto-scaled clusters during update
Count: utils.Int32(int32(count)),
Expand Down Expand Up @@ -367,10 +368,6 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf
profile.VnetSubnetID = utils.String(vnetSubnetID)
}

if upgradeSettingsRaw, ok := d.Get("upgrade_settings").([]interface{}); ok && len(upgradeSettingsRaw) > 0 {
profile.UpgradeSettings = expandUpgradeSettings(upgradeSettingsRaw)
}

maxCount := d.Get("max_count").(int)
minCount := d.Get("min_count").(int)

Expand Down Expand Up @@ -693,7 +690,9 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac
d.Set("vnet_subnet_id", props.VnetSubnetID)
d.Set("vm_size", string(props.VMSize))

d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings))
if err := d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings)); err != nil {
return fmt.Errorf("setting `upgrade_settings`: %+v", err)
}
}

return tags.FlattenAndSet(d, resp.Tags)
Expand Down Expand Up @@ -730,33 +729,54 @@ func upgradeSettingsSchema() *schema.Schema {
Schema: map[string]*schema.Schema{
"max_surge": {
Type: schema.TypeString,
Optional: true,
Required: true,
},
},
},
}
}

func upgradeSettingsForDataSourceSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_surge": {
Type: schema.TypeString,
Computed: true,
},
},
},
}
}

func expandUpgradeSettings(input []interface{}) *containerservice.AgentPoolUpgradeSettings {
setting := &containerservice.AgentPoolUpgradeSettings{}
if len(input) == 0 {
return nil
return setting
}
upgradeSettingInput := input[0].(map[string]interface{})
upgradeSetting := containerservice.AgentPoolUpgradeSettings{}

if maxSurgeRaw := upgradeSettingInput["max_surge"].(string); maxSurgeRaw != "" {
upgradeSetting.MaxSurge = utils.String(maxSurgeRaw)
v := input[0].(map[string]interface{})
if maxSurgeRaw := v["max_surge"].(string); maxSurgeRaw != "" {
setting.MaxSurge = utils.String(maxSurgeRaw)
}
return &upgradeSetting
return setting
}

func flattenUpgradeSettings(input *containerservice.AgentPoolUpgradeSettings) []interface{} {
upgradeSettings := make([]interface{}, 0)

if input == nil {
return upgradeSettings
return []interface{}{}
}

maxSurge := ""
if input.MaxSurge != nil {
maxSurge = *input.MaxSurge
}

return []interface{}{
map[string]interface{}{
"max_surge": maxSurge,
},
}
nodePoolSetting := make(map[string]interface{})
nodePoolSetting["max_surge"] = *input.MaxSurge
return append(upgradeSettings, nodePoolSetting)
}
Expand Up @@ -608,13 +608,31 @@ func testAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) {

data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.upgradeSettingsConfig(data),
Config: r.upgradeSettingsConfig(data, "2"),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("2"),
),
},
data.ImportStep(),
{
Config: r.upgradeSettingsConfig(data, "4"),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"),
check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("2"),
),
},
data.ImportStep(),
{
Config: r.upgradeSettingsConfig(data, ""),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("0"),
),
},
data.ImportStep(),
})
}

Expand Down Expand Up @@ -1371,7 +1389,14 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
`, r.templateConfig(data))
}

func (r KubernetesClusterNodePoolResource) upgradeSettingsConfig(data acceptance.TestData) string {
func (r KubernetesClusterNodePoolResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string {
template := r.templateConfig(data)
if maxSurge != "" {
maxSurge = fmt.Sprintf(`upgrade_settings {
max_surge = %q
}`, maxSurge)
}

return fmt.Sprintf(`
provider "azurerm" {
features {}
Expand All @@ -1384,11 +1409,9 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS2_v2"
node_count = 3
upgrade_settings {
max_surge = "2"
}
%s
}
`, r.templateConfig(data))
`, template, maxSurge)
}

func (r KubernetesClusterNodePoolResource) virtualNetworkAutomaticConfig(data acceptance.TestData) string {
Expand Down
Expand Up @@ -140,3 +140,40 @@ resource "azurerm_kubernetes_cluster" "test" {
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, controlPlaneVersion)
}

func (r KubernetesClusterResource) upgradeSettingsConfig(data acceptance.TestData, maxSurge string) string {
if maxSurge != "" {
maxSurge = fmt.Sprintf(`upgrade_settings {
max_surge = %q
}`, maxSurge)
}

return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
%s
}
identity {
type = "SystemAssigned"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, maxSurge)
}

0 comments on commit 540d4f3

Please sign in to comment.