-
Notifications
You must be signed in to change notification settings - Fork 155
/
deletion.go
169 lines (140 loc) · 6.62 KB
/
deletion.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
/*
Copyright 2020 The Kubermatic Kubernetes Platform contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterdeletion
import (
"context"
"fmt"
"go.uber.org/zap"
kubermaticapiv1 "k8c.io/kubermatic/v2/pkg/api/v1"
kubermaticv1 "k8c.io/kubermatic/v2/pkg/crd/kubermatic/v1"
kuberneteshelper "k8c.io/kubermatic/v2/pkg/kubernetes"
controllerruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
deletedLBAnnotationName = "kubermatic.io/cleaned-up-loadbalancers"
)
func New(seedClient controllerruntimeclient.Client, userClusterClientGetter func() (controllerruntimeclient.Client, error)) *Deletion {
return &Deletion{
seedClient: seedClient,
userClusterClientGetter: userClusterClientGetter,
}
}
type Deletion struct {
seedClient controllerruntimeclient.Client
userClusterClientGetter func() (controllerruntimeclient.Client, error)
}
// CleanupCluster is responsible for cleaning up a cluster.
func (d *Deletion) CleanupCluster(ctx context.Context, log *zap.SugaredLogger, cluster *kubermaticv1.Cluster) error {
log = log.Named("cleanup")
// Delete Volumes and LB's inside the user cluster
if err := d.cleanupInClusterResources(ctx, log, cluster); err != nil {
return err
}
// If cleanup didn't finish we have to go back, because if there are controllers running
// inside the cluster and we delete the nodes, we get stuck.
if kuberneteshelper.HasAnyFinalizer(cluster,
kubermaticapiv1.InClusterLBCleanupFinalizer,
kubermaticapiv1.InClusterPVCleanupFinalizer,
kubermaticapiv1.InClusterCredentialsRequestsCleanupFinalizer,
kubermaticapiv1.InClusterImageRegistryConfigCleanupFinalizer) {
return nil
}
if err := d.cleanupNodes(ctx, cluster); err != nil {
return err
}
// If we still have nodes, we must not cleanup other infrastructure at the cloud provider
if kuberneteshelper.HasFinalizer(cluster, kubermaticapiv1.NodeDeletionFinalizer) {
return nil
}
// We might need credentials for cloud provider cleanup. Since different cloud providers use different
// finalizers, we need to ensure that the credentials are not removed until the cloud provider is cleaned
// up, or in other words, all other finalizers have been removed from the cluster, and the
// CredentialsSecretsCleanupFinalizer is the only finalizer left.
if kuberneteshelper.HasOnlyFinalizer(cluster, kubermaticapiv1.CredentialsSecretsCleanupFinalizer) {
if err := d.cleanUpCredentialsSecrets(ctx, cluster); err != nil {
return err
}
}
return nil
}
func (d *Deletion) cleanupInClusterResources(ctx context.Context, log *zap.SugaredLogger, cluster *kubermaticv1.Cluster) error {
log = log.Named("in-cluster-resources")
shouldDeleteLBs := kuberneteshelper.HasFinalizer(cluster, kubermaticapiv1.InClusterLBCleanupFinalizer)
shouldDeletePVs := kuberneteshelper.HasFinalizer(cluster, kubermaticapiv1.InClusterPVCleanupFinalizer)
shouldDeleteCredentialsRequests := kuberneteshelper.HasFinalizer(cluster, kubermaticapiv1.InClusterCredentialsRequestsCleanupFinalizer)
shouldDeleteImageRegistryConfigs := kuberneteshelper.HasFinalizer(cluster, kubermaticapiv1.InClusterImageRegistryConfigCleanupFinalizer)
// If no relevant finalizer exists, directly return
if !shouldDeleteLBs && !shouldDeletePVs && !shouldDeleteCredentialsRequests && !shouldDeleteImageRegistryConfigs {
log.Debug("Skipping in-cluster-resources deletion. None of the in-cluster cleanup finalizers is set.")
return nil
}
// We'll set this to true in case we deleted something. This is meant to requeue as long as all resources are really gone
// We'll use it for LB's and PV's as well, so the Kubernetes controller manager does the cleanup of all resources in parallel
var deletedSomeResource bool
if shouldDeleteLBs {
deletedSomeLBs, err := d.cleanupLBs(ctx, log, cluster)
if err != nil {
return fmt.Errorf("failed to cleanup LBs: %v", err)
}
deletedSomeResource = deletedSomeResource || deletedSomeLBs
}
if shouldDeletePVs {
deletedSomeVolumes, err := d.cleanupVolumes(ctx, cluster)
if err != nil {
return fmt.Errorf("failed to cleanup LBs: %v", err)
}
deletedSomeResource = deletedSomeResource || deletedSomeVolumes
}
if shouldDeleteImageRegistryConfigs {
deletedSomeImageRegistryConfigs, err := d.cleanupImageRegistryConfigs(ctx, log, cluster)
if err != nil {
return fmt.Errorf("failed to cleanup ImageRegistryConfigs: %v", err)
}
// Prevent the credentials from getting invalidated before cleanup finished
if deletedSomeImageRegistryConfigs {
return nil
}
}
// This must come after the ImageRegistryConfigs deletion, as it uses a credential
// obtainted via a CredentialsRequest
if shouldDeleteCredentialsRequests {
deletedSomeCredentialsRequests, err := d.cleanupCredentialsRequests(ctx, log, cluster)
if err != nil {
return fmt.Errorf("failed to cleanup CredentialsRequests: %v", err)
}
deletedSomeResource = deletedSomeResource || deletedSomeCredentialsRequests
}
// If we deleted something it is implied that there was still something left. Just return
// here so the finalizers stay, it will make the cluster controller requeue us after a delay
// This also means that we may end up issuing multiple DELETE calls against the same ressource
// if cleaning up takes some time, but that shouldn't cause any harm
// We also need to return when something was deleted so the checkIfAllLoadbalancersAreGone
// call gets an updated version of the cluster from the lister
if deletedSomeResource {
return nil
}
lbsAreGone, err := d.checkIfAllLoadbalancersAreGone(ctx, cluster)
if err != nil {
return fmt.Errorf("failed to check if all Loadbalancers are gone: %v", err)
}
// Return so we check again later
if !lbsAreGone {
return nil
}
oldCluster := cluster.DeepCopy()
kuberneteshelper.RemoveFinalizer(cluster, kubermaticapiv1.InClusterLBCleanupFinalizer)
kuberneteshelper.RemoveFinalizer(cluster, kubermaticapiv1.InClusterPVCleanupFinalizer)
kuberneteshelper.RemoveFinalizer(cluster, kubermaticapiv1.InClusterCredentialsRequestsCleanupFinalizer)
kuberneteshelper.RemoveFinalizer(cluster, kubermaticapiv1.InClusterImageRegistryConfigCleanupFinalizer)
return d.seedClient.Patch(ctx, cluster, controllerruntimeclient.MergeFrom(oldCluster))
}