generated from giantswarm/template-operator
/
instances.go
122 lines (106 loc) · 3.82 KB
/
instances.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
package etcdbackup
import (
"context"
"fmt"
"github.com/dlclark/regexp2"
"github.com/giantswarm/apiextensions-backup/api/v1alpha1"
"github.com/giantswarm/microerror"
"github.com/giantswarm/operatorkit/v7/pkg/controller/context/reconciliationcanceledcontext"
"github.com/giantswarm/etcd-backup-operator/v3/pkg/giantnetes"
"github.com/giantswarm/etcd-backup-operator/v3/service/controller/key"
)
func (r *Resource) runBackupOnAllInstances(ctx context.Context, obj interface{}, handler func(context.Context, giantnetes.ETCDInstance, *v1alpha1.ETCDInstanceBackupStatusIndex) bool) (bool, error) {
customObject, err := key.ToCustomObject(obj)
if err != nil {
return false, microerror.Mask(err)
}
utils, err := giantnetes.NewUtils(r.logger, r.k8sClient)
if err != nil {
return false, microerror.Mask(err)
}
if len(customObject.Status.Instances) == 0 {
customObject.Status.Instances = make(map[string]v1alpha1.ETCDInstanceBackupStatusIndex)
}
var instances []giantnetes.ETCDInstance
if len(customObject.Spec.ClusterNames) > 0 {
r.logger.LogCtx(ctx, "level", "debug", "message", "CR contains explicit list of cluster names")
// User specified a list of cluster IDs to be backed up.
// Load workload clusters.
guestInstances, err := utils.GetTenantClusters(ctx)
if err != nil {
return false, microerror.Mask(err)
}
for _, id := range customObject.Spec.ClusterNames {
if id == key.ManagementCluster {
instances = append(instances, giantnetes.ETCDInstance{
Name: key.ManagementCluster,
ETCDv2: r.etcdV2Settings,
ETCDv3: r.etcdV3Settings,
},
)
} else {
found := false
for _, candidate := range guestInstances {
if candidate.Name == id {
instances = append(instances, candidate)
found = true
break
}
}
if !found {
r.logger.LogCtx(ctx, "level", "error", "message", fmt.Sprintf("cluster %q was not found", id))
instanceStatus := r.findOrInitializeInstanceStatus(ctx, customObject, id)
instanceStatus.Error = "No cluster found with such name"
instanceStatus.V2 = nil
instanceStatus.V3 = nil
customObject.Status.Instances[id] = instanceStatus
err = r.persistCustomObjectStatus(ctx, customObject)
if err != nil {
return false, microerror.Mask(err)
}
}
}
}
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "CR does not contain explicit list of cluster names")
// Control plane.
if !r.skipManagementClusterBackup {
cp := giantnetes.ETCDInstance{
Name: key.ManagementCluster,
ETCDv2: r.etcdV2Settings,
ETCDv3: r.etcdV3Settings,
}
instances = append(instances, cp)
}
if customObject.Spec.GuestBackup {
// Tenant clusters.
guestInstances, err := utils.GetTenantClusters(ctx)
if err != nil {
return false, microerror.Mask(err)
}
re := regexp2.MustCompile(customObject.Spec.ClustersRegex, 0)
for _, guestInstance := range guestInstances {
if isMatch, _ := re.MatchString(guestInstance.Name); isMatch {
instances = append(instances, guestInstance)
}
}
}
}
for _, etcdInstance := range instances {
instanceStatus := r.findOrInitializeInstanceStatus(ctx, customObject, etcdInstance.Name)
doneSomething := handler(ctx, etcdInstance, &instanceStatus)
if doneSomething {
customObject.Status.Instances[etcdInstance.Name] = instanceStatus
err = r.persistCustomObjectStatus(ctx, customObject)
if err != nil {
return false, microerror.Mask(err)
}
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("set resource status for instance '%s'", etcdInstance.Name))
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
reconciliationcanceledcontext.SetCanceled(ctx)
return true, nil
}
}
// No status changes have happened within any of the instances, backup is completed.
return false, nil
}