From f5bbad69daa9ea265d4968393439710e8a57c04c Mon Sep 17 00:00:00 2001 From: Andrei Titerlea <113932106+atiterlea@users.noreply.github.com> Date: Fri, 4 Aug 2023 08:41:21 -0700 Subject: [PATCH] Adding support for Azure monitor... along with a few simple bug fixes (#41) * Adding support for Azure monitor... along with a few simple bug fixes * terraform-docs: automated action * Accidentally deleted a deployment * terraform-docs: automated action * Adding live support for additional node pools (Azure) * terraform-docs: automated action * Fix for ACI connector * Small changes to enable container insights * terraform-docs: automated action * Forgot public clusters --------- Co-authored-by: github-actions[bot] --- infra/helm/helmfile.yaml | 6 +- .../{test => linux-private}/terragrunt.hcl | 8 +- .../aws/{prod => linux-public}/terragrunt.hcl | 18 +- infra/live/azure/hybrid-public/terragrunt.hcl | 82 +++++++++ .../terragrunt.hcl | 1 + .../{private => linux-private}/terragrunt.hcl | 1 + .../{public => linux-public}/terragrunt.hcl | 1 + infra/modules/aws/eks/main.tf | 2 +- infra/modules/azure/aks/README.md | 8 +- infra/modules/azure/aks/main.tf | 166 ++++++++++++++++-- infra/modules/azure/aks/variables.tf | 17 +- infra/src/aws/README.md | 1 + infra/src/aws/main.tf | 2 +- infra/src/aws/variables.tf | 4 + infra/src/azure/README.md | 3 + infra/src/azure/main.tf | 31 +--- infra/src/azure/variables.tf | 53 ++++++ 17 files changed, 339 insertions(+), 65 deletions(-) rename infra/live/aws/{test => linux-private}/terragrunt.hcl (87%) rename infra/live/aws/{prod => linux-public}/terragrunt.hcl (71%) create mode 100644 infra/live/azure/hybrid-public/terragrunt.hcl rename infra/live/azure/{lockdown-hub-and-spoke => kitchen-sink}/terragrunt.hcl (99%) rename infra/live/azure/{private => linux-private}/terragrunt.hcl (98%) rename infra/live/azure/{public => linux-public}/terragrunt.hcl (97%) diff --git a/infra/helm/helmfile.yaml b/infra/helm/helmfile.yaml index a1afae2..d82ff92 100644 --- a/infra/helm/helmfile.yaml +++ b/infra/helm/helmfile.yaml @@ -12,12 +12,14 @@ releases: - name: autoscaler namespace: kube-system chart: autoscaler/cluster-autoscaler - version: 9.26.0 + version: 9.29.1 set: - name: cloudProvider value: aws + - name: awsRegion + value: "us-east-1" - name: autoDiscovery.clusterName - value: "ghest" + value: "ghest-dev" - name: cert-manager namespace: cert-manager diff --git a/infra/live/aws/test/terragrunt.hcl b/infra/live/aws/linux-private/terragrunt.hcl similarity index 87% rename from infra/live/aws/test/terragrunt.hcl rename to infra/live/aws/linux-private/terragrunt.hcl index d58a02c..a95167b 100644 --- a/infra/live/aws/test/terragrunt.hcl +++ b/infra/live/aws/linux-private/terragrunt.hcl @@ -5,7 +5,7 @@ terraform { remote_state { backend = "s3" config = { - bucket = "sk8s-tfstate-dev" + bucket = "sk8s-tfstate-private" key = "terraform.tfstate" region = "us-east-1" } @@ -20,8 +20,8 @@ inputs = { // The subnet range must generate at least twice the number of subnets as the number of availability zones specified. // So, for 3 AZs, we need 6 subnets (3 public + 3 private). - cidr_block = "172.27.0.0/21" - subnet_range = 24 + cidr_block = "172.27.0.0/18" + subnet_range = 21 availability_zones = [ "us-east-1a", @@ -31,6 +31,8 @@ inputs = { cluster_name = "ghest-dev" + private_cluster = true + instance_type = "t3.large" disk_size = 100 diff --git a/infra/live/aws/prod/terragrunt.hcl b/infra/live/aws/linux-public/terragrunt.hcl similarity index 71% rename from infra/live/aws/prod/terragrunt.hcl rename to infra/live/aws/linux-public/terragrunt.hcl index b6ab835..d719b29 100644 --- a/infra/live/aws/prod/terragrunt.hcl +++ b/infra/live/aws/linux-public/terragrunt.hcl @@ -1,11 +1,11 @@ terraform { - source = "../../../src/aws" + source = "../../..//src/aws" } remote_state { backend = "s3" config = { - bucket = "sk8s-tfstate-prod" + bucket = "sk8s-tfstate-public" key = "terraform.tfstate" region = "us-east-1" } @@ -16,7 +16,7 @@ remote_state { } inputs = { - network_name = "ghest-prod" + network_name = "ghest-dev" // The subnet range must generate at least twice the number of subnets as the number of availability zones specified. // So, for 3 AZs, we need 6 subnets (3 public + 3 private). @@ -29,14 +29,16 @@ inputs = { "us-east-1c" ] - cluster_name = "ghest-prod" + cluster_name = "ghest-dev" - instance_type = "m6i.2xlarge" - disk_size = 200 + private_cluster = false + + instance_type = "t3.large" + disk_size = 100 // The Project tag is required; we use it to generate unique IAM roles for the EKS cluster being created. tags = { - "Project" = "GHESTProd" - "Environment" = "Production" + "Project" = "GHESTDev" + "Environment" = "Development" } } diff --git a/infra/live/azure/hybrid-public/terragrunt.hcl b/infra/live/azure/hybrid-public/terragrunt.hcl new file mode 100644 index 0000000..f352545 --- /dev/null +++ b/infra/live/azure/hybrid-public/terragrunt.hcl @@ -0,0 +1,82 @@ +terraform { + source = "../../..//src/azure" +} + +remote_state { + backend = "azurerm" + config = { + resource_group_name = "sk8s" + storage_account_name = "sk8sinfrastate" + container_name = "tfstate" + key = "public.tfstate" + } + generate = { + path = "backend.tf" + if_exists = "overwrite_terragrunt" + } +} + +inputs = { + resource_group_name = "sk8s-cluster" + network_name = "sk8s-cluster-vnet" + address_space = "10.1.0.0/16" + private_cluster = false + system_managed_dns = false + subnets = [ + { + name = "cidr" + address_prefix = "10.1.64.0/18" + attributes = { + routing = "internal" + managed = true + services = [ "aks" ] + } + }, + { + name = "nodes" + address_prefix = "10.1.0.0/18" + attributes = { + routing = "external" + managed = false + services = [ "aks" ] + } + }, + { + name = "aci" + address_prefix = "10.1.128.0/18" + attributes = { + routing = "external" + managed = false + services = [ "aks" ] + } + }, + { + name = "extras" + address_prefix = "10.1.192.0/19" + attributes = { + routing = "internal" + managed = false + services = [ "acr" ] + } + } + ] + additional_node_pools = { + "win" = { + auto_scaler_profile = { + enabled = true + max_node_count = 3 + min_node_count = 1 + } + node_size = "Standard_D2s_v3" + node_os = "Windows" + priority = { + spot_enabled = false + } + } + } + tags = { + project = "Sk8s" + owner = "GitHub Practice" + } + container_insights_enabled = true +} diff --git a/infra/live/azure/lockdown-hub-and-spoke/terragrunt.hcl b/infra/live/azure/kitchen-sink/terragrunt.hcl similarity index 99% rename from infra/live/azure/lockdown-hub-and-spoke/terragrunt.hcl rename to infra/live/azure/kitchen-sink/terragrunt.hcl index 02ace2e..c0fdbfd 100644 --- a/infra/live/azure/lockdown-hub-and-spoke/terragrunt.hcl +++ b/infra/live/azure/kitchen-sink/terragrunt.hcl @@ -97,6 +97,7 @@ inputs = { project = "Sk8s" owner = "GitHub Practice" } + container_insights_enabled = true network_rules = [ { diff --git a/infra/live/azure/private/terragrunt.hcl b/infra/live/azure/linux-private/terragrunt.hcl similarity index 98% rename from infra/live/azure/private/terragrunt.hcl rename to infra/live/azure/linux-private/terragrunt.hcl index 8fba130..fae6dbc 100644 --- a/infra/live/azure/private/terragrunt.hcl +++ b/infra/live/azure/linux-private/terragrunt.hcl @@ -87,4 +87,5 @@ inputs = { project = "Sk8s" owner = "GitHub Practice" } + container_insights_enabled = true } diff --git a/infra/live/azure/public/terragrunt.hcl b/infra/live/azure/linux-public/terragrunt.hcl similarity index 97% rename from infra/live/azure/public/terragrunt.hcl rename to infra/live/azure/linux-public/terragrunt.hcl index 5c2ad3c..5c9350e 100644 --- a/infra/live/azure/public/terragrunt.hcl +++ b/infra/live/azure/linux-public/terragrunt.hcl @@ -64,4 +64,5 @@ inputs = { project = "Sk8s" owner = "GitHub Practice" } + container_insights_enabled = true } diff --git a/infra/modules/aws/eks/main.tf b/infra/modules/aws/eks/main.tf index 01dcb76..8e21a70 100644 --- a/infra/modules/aws/eks/main.tf +++ b/infra/modules/aws/eks/main.tf @@ -135,7 +135,7 @@ data "aws_iam_policy_document" "autoscaler" { condition { test = "StringEquals" variable = "${replace(aws_iam_openid_connect_provider.self.url, "https://", "")}:sub" - values = ["system:serviceaccount:kube-system:cluster-autoscaler"] + values = ["system:serviceaccount:kube-system:autoscaler-aws-cluster-autoscaler"] } principals { diff --git a/infra/modules/azure/aks/README.md b/infra/modules/azure/aks/README.md index d41284b..d1bb6a2 100644 --- a/infra/modules/azure/aks/README.md +++ b/infra/modules/azure/aks/README.md @@ -22,6 +22,9 @@ No modules. |------|------| | [azurerm_kubernetes_cluster.self](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | | [azurerm_kubernetes_cluster_node_pool.self](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_log_analytics_solution.self](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | +| [azurerm_log_analytics_workspace.self](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | +| [azurerm_monitor_diagnostic_setting.self](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_diagnostic_setting) | resource | | [azurerm_role_assignment.aci-custom-route](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | | [azurerm_role_assignment.aci-default-route](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | | [azurerm_client_config.self](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | @@ -31,11 +34,12 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [additional\_node\_pools](#input\_additional\_node\_pools) | n/a |
map(object({
auto_scaler_profile = object({
enabled = bool
max_node_count = optional(number, 3)
min_node_count = optional(number, 1)
})
node_count = optional(number, 3)
node_size = optional(string, "Standard_D2s_v3")
node_os = optional(string, "Linux")
priority = object({
spot_enabled = bool
spot_price = optional(number, -1)
})
zones = optional(list(string), ["1", "2", "3"])
}))
| `{}` | no | | [cluster\_name](#input\_cluster\_name) | Name of Azure Container Registry. | `string` | n/a | yes | -| [default\_node\_pool](#input\_default\_node\_pool) | n/a |
object({
auto_scaler_profile = object({
enabled = bool
expander = optional(string, "random")
max_node_count = optional(number, 3)
min_node_count = optional(number, 1)
})
node_count = optional(number, 3)
node_size = string
zones = optional(list(string))
})
| n/a | yes | +| [container\_insights\_enabled](#input\_container\_insights\_enabled) | n/a | `bool` | `false` | no | +| [default\_node\_pool](#input\_default\_node\_pool) | n/a |
object({
auto_scaler_profile = object({
enabled = bool
expander = optional(string, "random")
max_node_count = optional(number, 3)
min_node_count = optional(number, 1)
})
node_count = optional(number, 3)
node_size = optional(string, "Standard_D2s_v3")
zones = optional(list(string), ["1", "2", "3"])
})
| n/a | yes | | [identity](#input\_identity) | n/a |
object({
assignment = string
id = optional(string)
})
| n/a | yes | | [network](#input\_network) | n/a |
object({
virtual_network_name = string
subnet_id = string
user_defined_routing = optional(bool, false)
dns_service_ip = string
docker_bridge_cidr = string
plugin = string
pod_cidr = optional(string)
service_cidr = string
})
| n/a | yes | -| [node\_pools](#input\_node\_pools) | n/a |
map(object({
auto_scaler_profile = object({
enabled = bool
max_node_count = optional(number, 3)
min_node_count = optional(number, 1)
})
node_count = optional(number, 3)
node_size = string
priority = object({
spot_enabled = bool
spot_price = optional(number, -1)
})
subnet_name = optional(string)
zones = optional(list(string))
}))
| `{}` | no | | [private\_cluster](#input\_private\_cluster) | Determine whether aks cluster will be private or public | `bool` | `true` | no | | [private\_zone\_id](#input\_private\_zone\_id) | ID of private DNS zone for looking up container registry private endpoint. | `string` | n/a | yes | | [resource\_group\_name](#input\_resource\_group\_name) | Name of Azure resource group in which DNS zone resides. | `string` | n/a | yes | diff --git a/infra/modules/azure/aks/main.tf b/infra/modules/azure/aks/main.tf index 3944e27..2bc4ea9 100644 --- a/infra/modules/azure/aks/main.tf +++ b/infra/modules/azure/aks/main.tf @@ -23,7 +23,7 @@ resource "azurerm_kubernetes_cluster" "self" { node_count = var.default_node_pool.auto_scaler_profile.enabled ? null : var.default_node_pool.node_count max_count = var.default_node_pool.auto_scaler_profile.enabled ? var.default_node_pool.auto_scaler_profile.max_node_count : null zones = var.default_node_pool.zones - # enable_host_encryption = true <- not enabled at the subscription level + # enable_host_encryption = true <- this needs to be enabled at the subscription level first tags = var.tags } @@ -43,6 +43,14 @@ resource "azurerm_kubernetes_cluster" "self" { } } + dynamic "oms_agent" { + for_each = var.container_insights_enabled ? [1] : [] + + content { + log_analytics_workspace_id = azurerm_log_analytics_workspace.self.0.id + } + } + identity { type = var.identity.assignment identity_ids = var.identity.assignment == "SystemAssigned" ? null : [var.identity.id] @@ -59,22 +67,23 @@ resource "azurerm_kubernetes_cluster" "self" { } resource "azurerm_kubernetes_cluster_node_pool" "self" { - for_each = var.node_pools - - name = each.key - kubernetes_cluster_id = azurerm_kubernetes_cluster.self.id - vnet_subnet_id = var.network.subnet_id - vm_size = each.value.node_size - enable_auto_scaling = each.value.auto_scaler_profile.enabled - min_count = each.value.auto_scaler_profile.enabled ? each.value.auto_scaler_profile.min_node_count : null - node_count = each.value.auto_scaler_profile.enabled ? null : each.value.node_count - max_count = each.value.auto_scaler_profile.enabled ? each.value.auto_scaler_profile.max_node_count : null - zones = each.value.zones + for_each = var.additional_node_pools + + name = each.key + kubernetes_cluster_id = azurerm_kubernetes_cluster.self.id + vnet_subnet_id = var.network.subnet_id + vm_size = each.value.node_size + os_type = each.value.node_os + enable_auto_scaling = each.value.auto_scaler_profile.enabled + min_count = each.value.auto_scaler_profile.enabled ? each.value.auto_scaler_profile.min_node_count : null + node_count = each.value.auto_scaler_profile.enabled ? null : each.value.node_count + max_count = each.value.auto_scaler_profile.enabled ? each.value.auto_scaler_profile.max_node_count : null + zones = each.value.zones # enable_host_encryption = true <- not enabled at the subscription level - priority = each.value.priority.spot_enabled ? "Spot" : "Regular" - spot_max_price = each.value.priority.spot_enabled ? each.value.priority.spot_price : null - eviction_policy = each.value.priority.spot_enabled ? "Delete" : null - tags = var.tags + priority = each.value.priority.spot_enabled ? "Spot" : "Regular" + spot_max_price = each.value.priority.spot_enabled ? each.value.priority.spot_price : null + eviction_policy = each.value.priority.spot_enabled ? "Delete" : null + tags = var.tags lifecycle { ignore_changes = [ @@ -86,7 +95,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "self" { resource "azurerm_role_assignment" "aci-default-route" { count = var.virtual_nodes.enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.self.identity[0].principal_id + principal_id = azurerm_kubernetes_cluster.self.aci_connector_linux[0].connector_identity[0].object_id role_definition_name = "Network Contributor" scope = "/subscriptions/${data.azurerm_client_config.self.subscription_id}/resourceGroups/${data.azurerm_resource_group.self.name}/providers/Microsoft.Network/virtualNetworks/${var.network.virtual_network_name}/subnets/${var.virtual_nodes.subnet_name}" } @@ -98,3 +107,126 @@ resource "azurerm_role_assignment" "aci-custom-route" { role_definition_name = "Network Contributor" scope = "/subscriptions/${data.azurerm_client_config.self.subscription_id}/resourceGroups/${data.azurerm_resource_group.self.name}/providers/Microsoft.Network/virtualNetworks/${var.network.virtual_network_name}" } + +resource "azurerm_log_analytics_workspace" "self" { + count = var.container_insights_enabled ? 1 : 0 + + name = "${data.azurerm_resource_group.self.name}-logs" + resource_group_name = data.azurerm_resource_group.self.name + location = data.azurerm_resource_group.self.location + sku = "PerGB2018" + tags = var.tags +} + +resource "azurerm_log_analytics_solution" "self" { + count = var.container_insights_enabled ? 1 : 0 + + solution_name = "ContainerInsights" + location = azurerm_log_analytics_workspace.self.0.location + resource_group_name = data.azurerm_resource_group.self.name + workspace_resource_id = azurerm_log_analytics_workspace.self.0.id + workspace_name = azurerm_log_analytics_workspace.self.0.name + + plan { + publisher = "Microsoft" + product = "OMSGallery/ContainerInsights" + } + + tags = var.tags + + lifecycle { + ignore_changes = [ + tags + ] + } +} + +resource "azurerm_monitor_diagnostic_setting" "self" { + count = var.container_insights_enabled ? 1 : 0 + + name = "AKS Control Plane Logging" + target_resource_id = azurerm_kubernetes_cluster.self.id + log_analytics_workspace_id = azurerm_log_analytics_workspace.self.0.id + + enabled_log { + category = "cloud-controller-manager" + + retention_policy { + enabled = true + days = 7 + } + } + + enabled_log { + category = "cluster-autoscaler" + + retention_policy { + enabled = true + days = 7 + } + } + + enabled_log { + category = "csi-azuredisk-controller" + + retention_policy { + enabled = true + days = 7 + } + } + + enabled_log { + category = "csi-azurefile-controller" + + retention_policy { + enabled = true + days = 7 + } + } + + enabled_log { + category = "csi-snapshot-controller" + + retention_policy { + enabled = true + days = 7 + } + } + + enabled_log { + category = "kube-apiserver" + + retention_policy { + enabled = true + days = 7 + } + } + + enabled_log { + category = "kube-controller-manager" + + retention_policy { + enabled = true + days = 7 + } + } + + enabled_log { + category = "kube-scheduler" + + retention_policy { + enabled = true + days = 7 + } + } + + metric { + category = "AllMetrics" + enabled = false + + retention_policy { + enabled = false + days = 0 + } + } +} diff --git a/infra/modules/azure/aks/variables.tf b/infra/modules/azure/aks/variables.tf index ec75654..f943b62 100644 --- a/infra/modules/azure/aks/variables.tf +++ b/infra/modules/azure/aks/variables.tf @@ -51,12 +51,12 @@ variable "default_node_pool" { min_node_count = optional(number, 1) }) node_count = optional(number, 3) - node_size = string - zones = optional(list(string)) + node_size = optional(string, "Standard_D2s_v3") + zones = optional(list(string), ["1", "2", "3"]) }) } -variable "node_pools" { +variable "additional_node_pools" { type = map(object({ auto_scaler_profile = object({ enabled = bool @@ -64,13 +64,13 @@ variable "node_pools" { min_node_count = optional(number, 1) }) node_count = optional(number, 3) - node_size = string + node_size = optional(string, "Standard_D2s_v3") + node_os = optional(string, "Linux") priority = object({ spot_enabled = bool spot_price = optional(number, -1) }) - subnet_name = optional(string) - zones = optional(list(string)) + zones = optional(list(string), ["1", "2", "3"]) })) default = {} } @@ -92,6 +92,11 @@ variable "identity" { }) } +variable "container_insights_enabled" { + type = bool + default = false +} + variable "tags" { type = map(string) default = {} diff --git a/infra/src/aws/README.md b/infra/src/aws/README.md index f291447..b1115d2 100644 --- a/infra/src/aws/README.md +++ b/infra/src/aws/README.md @@ -28,6 +28,7 @@ No resources. | [disk\_size](#input\_disk\_size) | n/a | `number` | n/a | yes | | [instance\_type](#input\_instance\_type) | n/a | `string` | n/a | yes | | [network\_name](#input\_network\_name) | n/a | `string` | n/a | yes | +| [private\_cluster](#input\_private\_cluster) | n/a | `bool` | n/a | yes | | [subnet\_range](#input\_subnet\_range) | n/a | `number` | n/a | yes | | [tags](#input\_tags) | n/a | `map(string)` | n/a | yes | diff --git a/infra/src/aws/main.tf b/infra/src/aws/main.tf index 4f7535e..2e1f374 100644 --- a/infra/src/aws/main.tf +++ b/infra/src/aws/main.tf @@ -13,7 +13,7 @@ module "eks" { source = "../../modules/aws/eks" cluster_name = var.cluster_name - is_private = true + is_private = var.private_cluster instance_type = var.instance_type disk_size = var.disk_size public_subnets = module.network.public_subnets diff --git a/infra/src/aws/variables.tf b/infra/src/aws/variables.tf index cfe295a..edbabdb 100644 --- a/infra/src/aws/variables.tf +++ b/infra/src/aws/variables.tf @@ -18,6 +18,10 @@ variable "cluster_name" { type = string } +variable "private_cluster" { + type = bool +} + variable "instance_type" { type = string } diff --git a/infra/src/azure/README.md b/infra/src/azure/README.md index 3cc8253..387fffc 100644 --- a/infra/src/azure/README.md +++ b/infra/src/azure/README.md @@ -29,8 +29,11 @@ No resources. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [additional\_node\_pools](#input\_additional\_node\_pools) | Additional node pools to create |
map(object({
auto_scaler_profile = object({
enabled = bool
max_node_count = optional(number, 3)
min_node_count = optional(number, 1)
})
node_count = optional(number, 3)
node_size = optional(string, "Standard_D2s_v3")
node_os = optional(string, "Linux")
priority = object({
spot_enabled = bool
spot_price = optional(number, -1)
})
zones = optional(list(string), ["1", "2", "3"])
}))
| `{}` | no | | [address\_space](#input\_address\_space) | n/a | `string` | n/a | yes | | [application\_rules](#input\_application\_rules) | List of application rules to be passed into the firewall policy |
list(object({
name = string
source_addresses = list(string)
destination_fqdns = list(string)
protocols = object({
port = string
type = string
})
}))
| `null` | no | +| [container\_insights\_enabled](#input\_container\_insights\_enabled) | Determine whether container insights will be enabled for the cluster | `bool` | `false` | no | +| [default\_node\_pool](#input\_default\_node\_pool) | Default node pool configuration |
object({
auto_scaler_profile = object({
enabled = bool
expander = optional(string, "random")
max_node_count = optional(number, 3)
min_node_count = optional(number, 1)
})
node_count = optional(number, 3)
node_size = string
zones = optional(list(string))
})
|
{
"auto_scaler_profile": {
"enabled": true,
"max_node_count": 9,
"min_node_count": 3
},
"node_size": "Standard_D2s_v3",
"zones": [
"1",
"2",
"3"
]
}
| no | | [firewall](#input\_firewall) | Firewall to use for outbound traffic. |
object({
name = string
resource_group = string
})
| `null` | no | | [network\_name](#input\_network\_name) | Name of virtual network. | `string` | n/a | yes | | [network\_rules](#input\_network\_rules) | List of network rules to be passed into the firewall policy |
list(object({
name = string
protocols = list(string)
source_addresses = list(string)
destination_addresses = list(string)
destination_ports = list(string)
}))
| `null` | no | diff --git a/infra/src/azure/main.tf b/infra/src/azure/main.tf index 1fe7fba..3507361 100644 --- a/infra/src/azure/main.tf +++ b/infra/src/azure/main.tf @@ -91,6 +91,8 @@ module "aks" { private_cluster = var.private_cluster private_zone_id = module.dns.zone_id == null ? "System" : module.dns.zone_id + container_insights_enabled = var.container_insights_enabled + network = { virtual_network_name = module.network.virtual_network_name subnet_id = var.firewall == null ? module.network.subnets["nodes"].id : module.firewall.route_table_id @@ -102,31 +104,8 @@ module "aks" { service_cidr = local.managed_subnets[0].address_prefix } - default_node_pool = { - auto_scaler_profile = { - enabled = true - max_node_count = 9 - min_node_count = 3 - } - node_size = "Standard_D2s_v3" - zones = ["1", "2", "3"] - } - - # node_pools = { - # spot = { - # auto_scaler_profile = { - # enabled = true - # max_node_count = 3 - # min_node_count = 1 - # } - # node_size = "Standard_D2s_v3" - # zones = ["1", "2", "3"] - # priority = { - # spot_enabled = true - # spot_price = -1 - # } - # } - # } + default_node_pool = var.default_node_pool + additional_node_pools = var.additional_node_pools identity = { assignment = "SystemAssigned" @@ -136,4 +115,6 @@ module "aks" { enabled = true subnet_name = "aci" } + + tags = var.tags } diff --git a/infra/src/azure/variables.tf b/infra/src/azure/variables.tf index 95e9e81..2e5117c 100644 --- a/infra/src/azure/variables.tf +++ b/infra/src/azure/variables.tf @@ -19,6 +19,59 @@ variable "private_cluster" { default = true } +variable "default_node_pool" { + type = object({ + auto_scaler_profile = object({ + enabled = bool + expander = optional(string, "random") + max_node_count = optional(number, 3) + min_node_count = optional(number, 1) + }) + node_count = optional(number, 3) + node_size = string + zones = optional(list(string)) + }) + description = "Default node pool configuration" + + default = { + auto_scaler_profile = { + enabled = true + max_node_count = 9 + min_node_count = 3 + } + node_size = "Standard_D2s_v3" + zones = ["1", "2", "3"] + } +} + +variable "additional_node_pools" { + type = map(object({ + auto_scaler_profile = object({ + enabled = bool + max_node_count = optional(number, 3) + min_node_count = optional(number, 1) + }) + node_count = optional(number, 3) + node_size = optional(string, "Standard_D2s_v3") + node_os = optional(string, "Linux") + priority = object({ + spot_enabled = bool + spot_price = optional(number, -1) + }) + zones = optional(list(string), ["1", "2", "3"]) + })) + description = "Additional node pools to create" + + default = {} +} + +variable "container_insights_enabled" { + type = bool + description = "Determine whether container insights will be enabled for the cluster" + + default = false +} + variable "system_managed_dns"{ type = bool description = "Determine if dns zone is managed by system"