forked from tigera/operator
/
component.go
142 lines (129 loc) · 4.68 KB
/
component.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
package utils
import (
"context"
"reflect"
"github.com/go-logr/logr"
"github.com/tigera/operator/pkg/controller/status"
"github.com/tigera/operator/pkg/render"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
type ComponentHandler interface {
CreateOrUpdate(context.Context, render.Component, *status.StatusManager) error
}
func NewComponentHandler(log logr.Logger, client client.Client, scheme *runtime.Scheme, cr metav1.Object) ComponentHandler {
return &componentHandler{
client: client,
scheme: scheme,
cr: cr,
log: log,
}
}
type componentHandler struct {
client client.Client
scheme *runtime.Scheme
cr metav1.Object
log logr.Logger
}
func (c componentHandler) CreateOrUpdate(ctx context.Context, component render.Component, status *status.StatusManager) error {
// Before creating the component, make sure that it is ready. This provides a hook to do
// dependency checking for the component.
cmpLog := c.log.WithValues("component", reflect.TypeOf(component))
cmpLog.V(2).Info("Checking if component is ready")
if !component.Ready() {
cmpLog.Info("Component is not ready, skipping")
return nil
}
cmpLog.V(2).Info("Reconciling")
// Iterate through each object that comprises the component and attempt to create it,
// or update it if needed.
daemonSets := []types.NamespacedName{}
deployments := []types.NamespacedName{}
for _, obj := range component.Objects() {
// Set CR instance as the owner and controller.
if err := controllerutil.SetControllerReference(c.cr, obj.(metav1.ObjectMetaAccessor).GetObjectMeta(), c.scheme); err != nil {
return err
}
logCtx := ContextLoggerForResource(c.log, obj)
var old runtime.Object = obj.DeepCopyObject()
var key client.ObjectKey
key, err := client.ObjectKeyFromObject(obj)
if err != nil {
return err
}
// Keep track of some objects so we can report on their status.
if obj.GetObjectKind().GroupVersionKind().Kind == "DaemonSet" {
daemonSets = append(daemonSets, key)
} else if obj.GetObjectKind().GroupVersionKind().Kind == "Deployment" {
deployments = append(deployments, key)
}
// Check to see if the object exists or not.
err = c.client.Get(ctx, key, old)
if err != nil {
if !apierrors.IsNotFound(err) {
// Anything other than "Not found" we should retry.
return err
}
// Otherwise, if it was not found, we should create it and move on.
logCtx.V(2).Info("Object does not exist, creating it", "error", err)
err = c.client.Create(ctx, obj)
if err != nil {
return err
}
continue
}
// The object exists. Update it, unless the user has marked it as "ignored".
if IgnoreObject(old) {
logCtx.Info("Ignoring annotated object")
continue
}
logCtx.V(1).Info("Resource already exists, update it")
err = c.client.Update(ctx, mergeState(obj, old))
if err != nil {
logCtx.WithValues("key", key).Info("Failed to update object.")
return err
}
continue
}
if status != nil {
status.SetDaemonsets(daemonSets)
status.SetDeployments(deployments)
}
cmpLog.Info("Done reconciling component")
return nil
}
// mergeState returns the object to pass to Update given the current and desired object states.
func mergeState(desired, current runtime.Object) runtime.Object {
switch desired.(type) {
case *v1.Service:
// Services are a special case since some fields (namely ClusterIP) are defaulted
// and we need to maintain them on updates.
oldRV := current.(metav1.ObjectMetaAccessor).GetObjectMeta().GetResourceVersion()
desired.(metav1.ObjectMetaAccessor).GetObjectMeta().SetResourceVersion(oldRV)
cs := current.(*v1.Service)
ds := desired.(*v1.Service)
ds.Spec.ClusterIP = cs.Spec.ClusterIP
return ds
case *batchv1.Job:
// Jobs have controller-uid values added to spec.selector and spec.template.metadata.labels.
// spec.selector and podtemplatespec are immutable so just copy real values over to desired state.
oldRV := current.(metav1.ObjectMetaAccessor).GetObjectMeta().GetResourceVersion()
desired.(metav1.ObjectMetaAccessor).GetObjectMeta().SetResourceVersion(oldRV)
cj := current.(*batchv1.Job)
dj := desired.(*batchv1.Job)
dj.Spec.Selector = cj.Spec.Selector
dj.Spec.Template = cj.Spec.Template
return dj
default:
// Default to just using the desired state, with an updated RV.
oldRV := current.(metav1.ObjectMetaAccessor).GetObjectMeta().GetResourceVersion()
desired.(metav1.ObjectMetaAccessor).GetObjectMeta().SetResourceVersion(oldRV)
return desired
}
}