From 78fa5480b0aa51f9d52bbc48ba83a735e3266e34 Mon Sep 17 00:00:00 2001 From: thomasprade <71489568+thomasprade@users.noreply.github.com> Date: Mon, 24 Jul 2023 13:36:48 +0200 Subject: [PATCH 1/2] Add cluster-cidr argument to k3s start command Changes on the `cluster_ipv4_cidr` variable in the kube.tf file were not completely reflected in the cluster config and the default pod ip range `10.42.0.0/16` conflicted with the creation of more than 41 agent nodepools. --- locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locals.tf b/locals.tf index 8abd9c3f..da24b788 100644 --- a/locals.tf +++ b/locals.tf @@ -66,7 +66,7 @@ locals { apply_k3s_selinux = ["/sbin/semodule -v -i /usr/share/selinux/packages/k3s.pp"] install_k3s_server = concat(local.common_pre_install_k3s_commands, [ - "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='server ${var.k3s_exec_server_args}' sh -" + "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='server ${var.k3s_exec_server_args} --cluster-cidr=${var.cluster_ipv4_cidr}' sh -" ], local.apply_k3s_selinux) install_k3s_agent = concat(local.common_pre_install_k3s_commands, [ "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='agent ${var.k3s_exec_agent_args}' sh -" From ef491688c2b5b5bc237f0ff5a73cecadd9e9e938 Mon Sep 17 00:00:00 2001 From: thomasprade <71489568+thomasprade@users.noreply.github.com> Date: Mon, 24 Jul 2023 14:17:39 +0200 Subject: [PATCH 2/2] Add variable for service-cidr and extend documentation. --- kube.tf.example | 12 +++++++++++- locals.tf | 2 +- variables.tf | 6 ++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/kube.tf.example b/kube.tf.example index c8c40e3b..16dfe7b9 100644 --- a/kube.tf.example +++ b/kube.tf.example @@ -60,10 +60,19 @@ module "kube-hetzner" { # If you must change the network CIDR you can do so below, but it is highly advised against. # network_ipv4_cidr = "10.0.0.0/8" + # Using the default configuration you can only create a maximum of 42 agent-nodepools. + # This is due to the creation of a subnet for each nodepool with cidrs being `10.[nodepool-index].0.0/16` which collides with k3s' cluster and service IP ranges (defaults below). + # Furthermore the maximum number of nodepools (controlplane and agent) is 50, due to a hard limit of 50 subnets per network (https://docs.hetzner.com/cloud/networks/faq/#:~:text=You%20can%20create%20up%20to%2050%20subnets.) + # So to be able to create a maximum of 50 nodepools in total, the values below have to be changed to something outside that range, e.g. `10.200.0.0/16` and `10.201.0.0/16` for cluster and service respectively. + # If you must change the cluster CIDR you can do so below, but it is highly advised against. # Cluster CIDR must be a part of the network CIDR! # cluster_ipv4_cidr = "10.42.0.0/16" + # If you must change the service CIDR you can do so below, but it is highly advised against. + # Cluster CIDR must be a part of the network CIDR! + # cluster_ipv4_cidr = "10.43.0.0/16" + # For the control planes, at least three nodes are the minimum for HA. Otherwise, you need to turn off the automatic upgrades (see README). # **It must always be an ODD number, never even!** Search the internet for "splitbrain problem with etcd" or see https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/ # For instance, one is ok (non-HA), two is not ok, and three is ok (becomes HA). It does not matter if they are in the same nodepool or not! So they can be in different locations and of various types. @@ -76,7 +85,8 @@ module "kube-hetzner" { # You can also rename it (if the count is 0), but do not remove a nodepool from the list. # The only nodepools that are safe to remove from the list are at the end. That is due to how subnets and IPs get allocated (FILO). - # You can, however, freely add other nodepools at the end of each list if you want. The maximum number of nodepools you can create combined for both lists is 255. + # You can, however, freely add other nodepools at the end of each list if you want. The theoratical maximum number of nodepools you can create combined for both lists is 255. + # But due to a limitation of 50 subnets per network by hetzner, the realistic limit is 50 (see ipv4_cidr above).) # Also, before decreasing the count of any nodepools to 0, it's essential to drain and cordon the nodes in question. Otherwise, it will leave your cluster in a bad state. # Before initializing the cluster, you can change all parameters and add or remove any nodepools. You need at least one nodepool of each kind, control plane, and agent. diff --git a/locals.tf b/locals.tf index da24b788..1b47cb0f 100644 --- a/locals.tf +++ b/locals.tf @@ -66,7 +66,7 @@ locals { apply_k3s_selinux = ["/sbin/semodule -v -i /usr/share/selinux/packages/k3s.pp"] install_k3s_server = concat(local.common_pre_install_k3s_commands, [ - "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='server ${var.k3s_exec_server_args} --cluster-cidr=${var.cluster_ipv4_cidr}' sh -" + "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='server ${var.k3s_exec_server_args} --cluster-cidr=${var.cluster_ipv4_cidr} --service-cidr=${var.service_ipv4_cidr}' sh -" ], local.apply_k3s_selinux) install_k3s_agent = concat(local.common_pre_install_k3s_commands, [ "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='agent ${var.k3s_exec_agent_args}' sh -" diff --git a/variables.tf b/variables.tf index fa137739..0c95ffbb 100644 --- a/variables.tf +++ b/variables.tf @@ -80,6 +80,12 @@ variable "cluster_ipv4_cidr" { default = "10.42.0.0/16" } +variable "service_ipv4_cidr" { + description = "Internal Service CIDR, used for the controller and currently for calico." + type = string + default = "10.43.0.0/16" +} + variable "load_balancer_location" { description = "Default load balancer location." type = string