/
defaults.go
119 lines (107 loc) · 3.64 KB
/
defaults.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
package k3s
import (
"strings"
"github.com/spigell/pulumi-hcloud-kube-hetzner/internal/system/variables"
"github.com/spigell/pulumi-hcloud-kube-hetzner/internal/utils"
)
const (
defaultServiceCIDR = "10.140.0.0/16"
defaultClusterCIDR = "10.141.0.0/16"
)
// This is very opinionated values and it is based on my experience with k3s.
var (
defaultKubeControllerManagerArgs = map[string]string{
// Increase time for a grace period for failed nodes.
// With this increased value cluster discovers failed nodes longer.
// K3s are mostly used in small environments with very tight amounts of resources.
// So, it is better to wait a bit longer for a node to come back than to lose it.
"node-monitor-grace-period": "2m",
}
defaultsKubeCloudControllerManagerArgs = map[string]string{
// https://github.com/k3s-io/k3s/discussions/6452#discussioncomment-4080240
// It can conflict with hetzner CCM
"secure-port": "0",
}
defaultKubeAPIServerArgs = map[string]string{
// If the node is down there is no need to wait more than 60s.
"default-not-ready-toleration-seconds": "60",
"default-unreachable-toleration-seconds": "60",
}
defaultsKubeletArgs = map[string]map[string]string{
variables.ServerRole: {
// every 5s is too much for small clusters.
"node-status-update-frequency": "20s",
// We need to be sure that server has needed resources for k3s binary service.
"system-reserved": "cpu=1,memory=1Gi",
},
variables.AgentRole: {
"node-status-update-frequency": "20s",
// Agent consumes less resources than server.
"system-reserved": "cpu=100m,memory=100Mi",
},
}
DefaultTaints = map[string][]string{
variables.ServerRole: {
// This taints are needed to prevent pods from being scheduled on the server node.
// Used in situations when agent nodes exists.
"CriticalAddonsOnly=true:NoExecute",
"node-role.kubernetes.io/control-plane:NoSchedule",
},
}
)
func (k *K3sConfig) WithServerDefaults() *K3sConfig {
k.WriteKubeconfigMode = "0644"
// Disable it for now. It is not needed for small clusters.
// It doesn't work with CCM + HA
// TO DO: fix it
k.DisableNetworkPolicy = true
k.TLSSanSecurity = true
if k.ClusterCidr == "" {
k.ClusterCidr = defaultClusterCIDR
}
if k.ServiceCidr == "" {
k.ServiceCidr = defaultServiceCIDR
}
for _, key := range utils.SortedMapKeys(defaultKubeControllerManagerArgs) {
value := defaultKubeControllerManagerArgs[key]
if !containsKey(k.KubeControllerManagerArgs, key) {
k.KubeControllerManagerArgs = append(k.KubeControllerManagerArgs,
strings.Join([]string{key, value}, "="),
)
}
}
for _, key := range utils.SortedMapKeys(defaultsKubeCloudControllerManagerArgs) {
value := defaultsKubeCloudControllerManagerArgs[key]
if !containsKey(k.KubeCloudControllerManagerArgs, key) {
k.KubeCloudControllerManagerArgs = append(k.KubeCloudControllerManagerArgs,
strings.Join([]string{key, value}, "="),
)
}
}
for _, key := range utils.SortedMapKeys(defaultKubeAPIServerArgs) {
value := defaultKubeAPIServerArgs[key]
if !containsKey(k.KubeAPIServerArgs, key) {
k.KubeAPIServerArgs = append(k.KubeAPIServerArgs,
strings.Join([]string{key, value}, "="),
)
}
}
for _, key := range utils.SortedMapKeys(defaultsKubeletArgs[variables.ServerRole]) {
value := defaultsKubeletArgs[variables.ServerRole][key]
if !containsKey(k.KubeletArgs, key) {
k.KubeletArgs = append(k.KubeletArgs,
strings.Join([]string{key, value}, "="),
)
}
}
return k
}
// containsKey checks if a key exists in a slice.
func containsKey(slice []string, key string) bool {
for _, s := range slice {
if strings.Split(s, "=")[0] == key {
return true
}
}
return false
}