-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
clusteringress.go
225 lines (198 loc) · 8.19 KB
/
clusteringress.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusteringress
import (
"context"
"reflect"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/cache"
"github.com/knative/pkg/apis/istio/v1alpha3"
istioinformers "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3"
istiolisters "github.com/knative/pkg/client/listers/istio/v1alpha3"
"github.com/knative/pkg/controller"
"github.com/knative/pkg/logging"
"github.com/knative/serving/pkg/apis/networking"
"github.com/knative/serving/pkg/apis/networking/v1alpha1"
informers "github.com/knative/serving/pkg/client/informers/externalversions/networking/v1alpha1"
listers "github.com/knative/serving/pkg/client/listers/networking/v1alpha1"
"github.com/knative/serving/pkg/reconciler"
"github.com/knative/serving/pkg/reconciler/v1alpha1/clusteringress/resources"
"github.com/knative/serving/pkg/reconciler/v1alpha1/clusteringress/resources/names"
)
const controllerAgentName = "clusteringress-controller"
// Reconciler implements controller.Reconciler for ClusterIngress resources.
type Reconciler struct {
*reconciler.Base
// listers index properties about resources
clusterIngressLister listers.ClusterIngressLister
virtualServiceLister istiolisters.VirtualServiceLister
}
// Check that our Reconciler implements controller.Reconciler
var _ controller.Reconciler = (*Reconciler)(nil)
// NewController initializes the controller and is called by the generated code
// Registers eventhandlers to enqueue events.
func NewController(
opt reconciler.Options,
clusterIngressInformer informers.ClusterIngressInformer,
virtualServiceInformer istioinformers.VirtualServiceInformer,
) *controller.Impl {
c := &Reconciler{
Base: reconciler.NewBase(opt, controllerAgentName),
clusterIngressLister: clusterIngressInformer.Lister(),
virtualServiceLister: virtualServiceInformer.Lister(),
}
impl := controller.NewImpl(c, c.Logger, "ClusterIngresses", reconciler.MustNewStatsReporter("ClusterIngress", c.Logger))
c.Logger.Info("Setting up event handlers")
clusterIngressInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: impl.Enqueue,
UpdateFunc: controller.PassNew(impl.Enqueue),
DeleteFunc: impl.Enqueue,
})
virtualServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.enqueueOwnerIngress(impl),
UpdateFunc: controller.PassNew(c.enqueueOwnerIngress(impl)),
})
return impl
}
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the ClusterIngress resource
// with the current status of the resource.
func (c *Reconciler) Reconcile(ctx context.Context, key string) error {
// Convert the namespace/name string into a distinct namespace and name
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
c.Logger.Errorf("invalid resource key: %s", key)
return nil
}
logger := logging.FromContext(ctx)
// Get the ClusterIngress resource with this name.
original, err := c.clusterIngressLister.Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
logger.Errorf("clusteringress %q in work queue no longer exists", key)
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy
ci := original.DeepCopy()
// Reconcile this copy of the ClusterIngress and then write back any status
// updates regardless of whether the reconciliation errored out.
err = c.reconcile(ctx, ci)
if equality.Semantic.DeepEqual(original.Status, ci.Status) {
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if _, err := c.updateStatus(ctx, ci); err != nil {
logger.Warn("Failed to update clusterIngress status", zap.Error(err))
c.Recorder.Eventf(ci, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for ClusterIngress %q: %v", ci.Name, err)
return err
}
return err
}
// Update the Status of the ClusterIngress. Caller is responsible for checking
// for semantic differences before calling.
func (c *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.ClusterIngress) (*v1alpha1.ClusterIngress, error) {
ci, err := c.clusterIngressLister.Get(desired.Name)
if err != nil {
return nil, err
}
// If there's nothing to update, just return.
if reflect.DeepEqual(ci.Status, desired.Status) {
return ci, nil
}
// Don't modify the informers copy
existing := ci.DeepCopy()
existing.Status = desired.Status
// TODO: for CRD there's no updatestatus, so use normal update.
updated, err := c.ServingClientSet.NetworkingV1alpha1().ClusterIngresses().Update(existing)
if err != nil {
return nil, err
}
c.Recorder.Eventf(desired, corev1.EventTypeNormal, "Updated", "Updated status for clusterIngress %q", desired.Name)
return updated, nil
}
func (c *Reconciler) reconcile(ctx context.Context, ci *v1alpha1.ClusterIngress) error {
logger := logging.FromContext(ctx)
ci.Status.InitializeConditions()
vs := resources.MakeVirtualService(ci)
logger.Infof("Reconciling clusterIngress :%v", ci)
logger.Info("Creating/Updating VirtualService")
if err := c.reconcileVirtualService(ctx, ci, vs); err != nil {
// TODO(lichuqiang): should we explicitly mark the ingress as unready
// when error reconciling VirtualService?
return err
}
// As underlying network programming (VirtualService now) is stateless,
// here we simply mark the ingress as ready if the VirtualService
// is successfully synced.
ci.Status.MarkNetworkConfigured()
ci.Status.MarkLoadBalancerReady([]v1alpha1.LoadBalancerIngressStatus{
{DomainInternal: names.K8sGatewayServiceFullname},
})
logger.Info("ClusterIngress successfully synced")
return nil
}
func (c *Reconciler) reconcileVirtualService(ctx context.Context, ci *v1alpha1.ClusterIngress,
desired *v1alpha3.VirtualService) error {
logger := logging.FromContext(ctx)
ns := desired.Namespace
name := desired.Name
vs, err := c.virtualServiceLister.VirtualServices(ns).Get(name)
if apierrs.IsNotFound(err) {
vs, err = c.SharedClientSet.NetworkingV1alpha3().VirtualServices(ns).Create(desired)
if err != nil {
logger.Error("Failed to create VirtualService", zap.Error(err))
c.Recorder.Eventf(ci, corev1.EventTypeWarning, "CreationFailed",
"Failed to create VirtualService %q/%q: %v", ns, name, err)
return err
}
c.Recorder.Eventf(ci, corev1.EventTypeNormal, "Created",
"Created VirtualService %q", desired.Name)
} else if err != nil {
return err
} else if !equality.Semantic.DeepEqual(vs.Spec, desired.Spec) {
// Don't modify the informers copy
existing := vs.DeepCopy()
existing.Spec = desired.Spec
_, err = c.SharedClientSet.NetworkingV1alpha3().VirtualServices(ns).Update(existing)
if err != nil {
logger.Error("Failed to update VirtualService", zap.Error(err))
return err
}
c.Recorder.Eventf(desired, corev1.EventTypeNormal, "Updated",
"Updated status for VirtualService %q/%q", ns, name)
}
return nil
}
func (c *Reconciler) enqueueOwnerIngress(impl *controller.Impl) func(obj interface{}) {
return func(obj interface{}) {
vs, ok := obj.(*v1alpha3.VirtualService)
if !ok {
c.Logger.Infof("Ignoring non-VirtualService objects %v", obj)
return
}
// Check whether the VirtualService is referred by a ClusterIngress.
ingressName, ok := vs.Labels[networking.IngressLabelKey]
if !ok {
c.Logger.Infof("VirtualService %s/%s does not have a referring ingress", vs.Namespace, vs.Name)
return
}
impl.EnqueueKey(ingressName)
}
}