-
Notifications
You must be signed in to change notification settings - Fork 156
/
cluster.go
260 lines (219 loc) · 9.39 KB
/
cluster.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
/*
Copyright 2020 The Kubermatic Kubernetes Platform contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaulting
import (
"context"
"fmt"
"github.com/imdario/mergo"
"go.uber.org/zap"
kubermaticv1 "k8c.io/kubermatic/v2/pkg/apis/kubermatic/v1"
kubermaticv1helper "k8c.io/kubermatic/v2/pkg/apis/kubermatic/v1/helper"
"k8c.io/kubermatic/v2/pkg/cni"
"k8c.io/kubermatic/v2/pkg/provider"
"k8c.io/kubermatic/v2/pkg/resources"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/ptr"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// DefaultClusterSpec defaults the cluster spec when creating a new cluster.
// Defaults are taken from, in order:
// 1. ClusterTemplate (if given)
// 2. Seed's spec.componentsOverrides
// 3. KubermaticConfiguration's spec.userCluster
// 4. Constants in pkg/controller/operator/defaults
//
// This function assumes that the KubermaticConfiguration has already been defaulted
// (as the KubermaticConfigurationGetter does that automatically), but the Seed
// does not yet need to be defaulted (to the values of the KubermaticConfiguration).
func DefaultClusterSpec(ctx context.Context, spec *kubermaticv1.ClusterSpec, template *kubermaticv1.ClusterTemplate, seed *kubermaticv1.Seed, config *kubermaticv1.KubermaticConfiguration, cloudProvider provider.CloudProvider) error {
var err error
// Apply default values to the Seed, just in case.
if config != nil {
seed, err = DefaultSeed(seed, config, zap.NewNop().Sugar())
if err != nil {
return fmt.Errorf("failed to apply default values to Seed: %w", err)
}
}
// If a ClusterTemplate was configured for the Seed, the caller
// retrieved it for us already and we can use it as the primary
// source for defaults.
if template != nil {
if err := mergo.Merge(spec, template.Spec); err != nil {
return fmt.Errorf("failed to apply defaulting template to Cluster spec: %w", err)
}
}
// Checking and applying each field of the ComponentSettings is tedious,
// so we reuse mergo as well. Even though DefaultComponentSettings is
// deprecated, we cannot remove its handling here, as the template can
// be unconfigured (i.e. nil).
if err := mergo.Merge(&spec.ComponentsOverride, seed.Spec.DefaultComponentSettings); err != nil {
return fmt.Errorf("failed to apply defaulting template to Cluster spec: %w", err)
}
// Give cloud providers a chance to default their spec.
if cloudProvider != nil {
if err := cloudProvider.DefaultCloudSpec(ctx, spec); err != nil {
return fmt.Errorf("failed to default cloud spec: %w", err)
}
}
// set expose strategy
if spec.ExposeStrategy == "" {
spec.ExposeStrategy = seed.Spec.ExposeStrategy
}
// Though the caller probably had already determined the datacenter
// to construct the cloud provider instance, we do not take the DC
// as a parameter, to keep this function's signature at least somewhat
// short. But to enforce certain settings, we still need to have the DC.
datacenter, fieldErr := DatacenterForClusterSpec(spec, seed)
if fieldErr != nil {
return fieldErr
}
// Enforce audit logging
if datacenter.Spec.EnforceAuditLogging {
spec.AuditLogging = &kubermaticv1.AuditLoggingSettings{
Enabled: true,
}
}
// Enforce PodSecurityPolicy
if datacenter.Spec.EnforcePodSecurityPolicy {
spec.UsePodSecurityPolicyAdmissionPlugin = true
}
// Ensure provider name matches the given spec
providerName, err := kubermaticv1helper.ClusterCloudProviderName(spec.Cloud)
if err != nil {
return fmt.Errorf("failed to determine cloud provider: %w", err)
}
spec.Cloud.ProviderName = providerName
// Kubernetes dashboard is enabled by default.
if spec.KubernetesDashboard == nil {
spec.KubernetesDashboard = &kubermaticv1.KubernetesDashboard{
Enabled: true,
}
}
// OSM is enabled by default.
if spec.EnableOperatingSystemManager == nil {
spec.EnableOperatingSystemManager = ptr.To(true)
}
// Add default CNI plugin settings if not present.
if spec.CNIPlugin == nil {
if spec.Cloud.Edge != nil {
spec.CNIPlugin = &kubermaticv1.CNIPluginSettings{
Type: kubermaticv1.CNIPluginTypeCanal,
Version: cni.GetDefaultCNIPluginVersion(kubermaticv1.CNIPluginTypeCanal),
}
} else {
spec.CNIPlugin = &kubermaticv1.CNIPluginSettings{
Type: kubermaticv1.CNIPluginTypeCilium,
Version: cni.GetDefaultCNIPluginVersion(kubermaticv1.CNIPluginTypeCilium),
}
}
} else if spec.CNIPlugin.Version == "" {
spec.CNIPlugin.Version = cni.GetDefaultCNIPluginVersion(spec.CNIPlugin.Type)
}
// default cluster networking parameters
spec.ClusterNetwork = DefaultClusterNetwork(spec.ClusterNetwork, kubermaticv1.ProviderType(spec.Cloud.ProviderName), spec.ExposeStrategy)
// If KubeLB is enforced, enable it.
if datacenter.Spec.KubeLB != nil && datacenter.Spec.KubeLB.Enforced {
if spec.KubeLB == nil {
spec.KubeLB = &kubermaticv1.KubeLB{
Enabled: true,
}
} else {
spec.KubeLB.Enabled = true
}
}
return nil
}
// GetDefaultingClusterTemplate returns the ClusterTemplate that is referenced by the Seed.
// Note that this can return nil if no template is configured yet (this is not considered
// an error).
func GetDefaultingClusterTemplate(ctx context.Context, client ctrlruntimeclient.Reader, seed *kubermaticv1.Seed) (*kubermaticv1.ClusterTemplate, error) {
if seed.Spec.DefaultClusterTemplate == "" {
return nil, nil
}
tpl := kubermaticv1.ClusterTemplate{}
key := types.NamespacedName{Namespace: seed.Namespace, Name: seed.Spec.DefaultClusterTemplate}
if err := client.Get(ctx, key, &tpl); err != nil {
return nil, fmt.Errorf("failed to get ClusterTemplate: %w", err)
}
if scope := tpl.Labels["scope"]; scope != kubermaticv1.SeedTemplateScope {
return nil, fmt.Errorf("invalid scope of default cluster template, is %q but must be %q", scope, kubermaticv1.SeedTemplateScope)
}
return &tpl, nil
}
func DatacenterForClusterSpec(spec *kubermaticv1.ClusterSpec, seed *kubermaticv1.Seed) (*kubermaticv1.Datacenter, *field.Error) {
datacenterName := spec.Cloud.DatacenterName
if datacenterName == "" {
return nil, field.Required(field.NewPath("spec", "cloud", "dc"), "no datacenter name specified")
}
for dcName, dc := range seed.Spec.Datacenters {
if dcName == datacenterName {
return &dc, nil
}
}
return nil, field.Invalid(field.NewPath("spec", "cloud", "dc"), datacenterName, "invalid datacenter name")
}
func DefaultClusterNetwork(specClusterNetwork kubermaticv1.ClusterNetworkingConfig, provider kubermaticv1.ProviderType, exposeStrategy kubermaticv1.ExposeStrategy) kubermaticv1.ClusterNetworkingConfig {
if specClusterNetwork.IPFamily == "" {
if len(specClusterNetwork.Pods.CIDRBlocks) < 2 {
// single / no pods CIDR means IPv4-only (IPv6-only is not supported yet and not allowed by cluster validation)
specClusterNetwork.IPFamily = kubermaticv1.IPFamilyIPv4
} else {
// more than one pods CIDR means dual-stack (multiple IPv4 CIDRs are not allowed by cluster validation)
specClusterNetwork.IPFamily = kubermaticv1.IPFamilyDualStack
}
}
if len(specClusterNetwork.Pods.CIDRBlocks) == 0 {
if specClusterNetwork.IPFamily == kubermaticv1.IPFamilyDualStack {
specClusterNetwork.Pods.CIDRBlocks = []string{resources.GetDefaultPodCIDRIPv4(provider), resources.DefaultClusterPodsCIDRIPv6}
} else {
specClusterNetwork.Pods.CIDRBlocks = []string{resources.GetDefaultPodCIDRIPv4(provider)}
}
}
if len(specClusterNetwork.Services.CIDRBlocks) == 0 {
if specClusterNetwork.IPFamily == kubermaticv1.IPFamilyDualStack {
specClusterNetwork.Services.CIDRBlocks = []string{resources.GetDefaultServicesCIDRIPv4(provider), resources.DefaultClusterServicesCIDRIPv6}
} else {
specClusterNetwork.Services.CIDRBlocks = []string{resources.GetDefaultServicesCIDRIPv4(provider)}
}
}
if specClusterNetwork.NodeCIDRMaskSizeIPv4 == nil && specClusterNetwork.Pods.HasIPv4CIDR() {
specClusterNetwork.NodeCIDRMaskSizeIPv4 = ptr.To[int32](resources.DefaultNodeCIDRMaskSizeIPv4)
}
if specClusterNetwork.NodeCIDRMaskSizeIPv6 == nil && specClusterNetwork.Pods.HasIPv6CIDR() {
specClusterNetwork.NodeCIDRMaskSizeIPv6 = ptr.To[int32](resources.DefaultNodeCIDRMaskSizeIPv6)
}
if specClusterNetwork.ProxyMode == "" {
specClusterNetwork.ProxyMode = resources.GetDefaultProxyMode(provider)
}
if specClusterNetwork.ProxyMode == resources.IPVSProxyMode {
if specClusterNetwork.IPVS == nil {
specClusterNetwork.IPVS = &kubermaticv1.IPVSConfiguration{}
}
if specClusterNetwork.IPVS.StrictArp == nil {
specClusterNetwork.IPVS.StrictArp = ptr.To(true)
}
}
if specClusterNetwork.NodeLocalDNSCacheEnabled == nil {
specClusterNetwork.NodeLocalDNSCacheEnabled = ptr.To(resources.DefaultNodeLocalDNSCacheEnabled)
}
if specClusterNetwork.DNSDomain == "" {
specClusterNetwork.DNSDomain = "cluster.local"
}
if exposeStrategy == kubermaticv1.ExposeStrategyTunneling {
if specClusterNetwork.TunnelingAgentIP == "" {
specClusterNetwork.TunnelingAgentIP = resources.DefaultTunnelingAgentIP
}
}
return specClusterNetwork
}