diff --git a/kube.tf.example b/kube.tf.example index 372988cc..859f4cee 100644 --- a/kube.tf.example +++ b/kube.tf.example @@ -60,10 +60,19 @@ module "kube-hetzner" { # If you must change the network CIDR you can do so below, but it is highly advised against. # network_ipv4_cidr = "10.0.0.0/8" + # Using the default configuration you can only create a maximum of 42 agent-nodepools. + # This is due to the creation of a subnet for each nodepool with cidrs being `10.[nodepool-index].0.0/16` which collides with k3s' cluster and service IP ranges (defaults below). + # Furthermore the maximum number of nodepools (controlplane and agent) is 50, due to a hard limit of 50 subnets per network (https://docs.hetzner.com/cloud/networks/faq/#:~:text=You%20can%20create%20up%20to%2050%20subnets.) + # So to be able to create a maximum of 50 nodepools in total, the values below have to be changed to something outside that range, e.g. `10.200.0.0/16` and `10.201.0.0/16` for cluster and service respectively. + # If you must change the cluster CIDR you can do so below, but it is highly advised against. # Cluster CIDR must be a part of the network CIDR! # cluster_ipv4_cidr = "10.42.0.0/16" + # If you must change the service CIDR you can do so below, but it is highly advised against. + # Cluster CIDR must be a part of the network CIDR! + # cluster_ipv4_cidr = "10.43.0.0/16" + # For the control planes, at least three nodes are the minimum for HA. Otherwise, you need to turn off the automatic upgrades (see README). # **It must always be an ODD number, never even!** Search the internet for "splitbrain problem with etcd" or see https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/ # For instance, one is ok (non-HA), two is not ok, and three is ok (becomes HA). It does not matter if they are in the same nodepool or not! So they can be in different locations and of various types. @@ -76,7 +85,8 @@ module "kube-hetzner" { # You can also rename it (if the count is 0), but do not remove a nodepool from the list. # The only nodepools that are safe to remove from the list are at the end. That is due to how subnets and IPs get allocated (FILO). - # You can, however, freely add other nodepools at the end of each list if you want. The maximum number of nodepools you can create combined for both lists is 255. + # You can, however, freely add other nodepools at the end of each list if you want. The theoratical maximum number of nodepools you can create combined for both lists is 255. + # But due to a limitation of 50 subnets per network by hetzner, the realistic limit is 50 (see ipv4_cidr above).) # Also, before decreasing the count of any nodepools to 0, it's essential to drain and cordon the nodes in question. Otherwise, it will leave your cluster in a bad state. # Before initializing the cluster, you can change all parameters and add or remove any nodepools. You need at least one nodepool of each kind, control plane, and agent. diff --git a/locals.tf b/locals.tf index 77b44517..24546e3d 100644 --- a/locals.tf +++ b/locals.tf @@ -66,7 +66,7 @@ locals { apply_k3s_selinux = ["/sbin/semodule -v -i /usr/share/selinux/packages/k3s.pp"] install_k3s_server = concat(local.common_pre_install_k3s_commands, [ - "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='server ${var.k3s_exec_server_args}' sh -" + "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='server ${var.k3s_exec_server_args} --cluster-cidr=${var.cluster_ipv4_cidr} --service-cidr=${var.service_ipv4_cidr}' sh -" ], local.apply_k3s_selinux) install_k3s_agent = concat(local.common_pre_install_k3s_commands, [ "curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC='agent ${var.k3s_exec_agent_args}' sh -" diff --git a/variables.tf b/variables.tf index 44f6b955..a8ce2158 100644 --- a/variables.tf +++ b/variables.tf @@ -80,6 +80,12 @@ variable "cluster_ipv4_cidr" { default = "10.42.0.0/16" } +variable "service_ipv4_cidr" { + description = "Internal Service CIDR, used for the controller and currently for calico." + type = string + default = "10.43.0.0/16" +} + variable "load_balancer_location" { description = "Default load balancer location." type = string