Skip to content

Commit

Permalink
Update status updater name to IngressStatusUpdater, fix RBAC (#2386)
Browse files Browse the repository at this point in the history
Updates #403.

Change StatusLoadBalancer name to IngressStatusUpdater, makes the purpose clearer.

Minor fix to RBAC - "ingress/status" resource requires "update" for updating status.

Signed-off-by: Nick Young <ynick@vmware.com>
  • Loading branch information
youngnick committed Mar 31, 2020
1 parent fc79b07 commit f1b2a4e
Show file tree
Hide file tree
Showing 8 changed files with 53 additions and 40 deletions.
49 changes: 27 additions & 22 deletions cmd/contour/ingressstatus.go
@@ -1,4 +1,4 @@
// Copyright © 2019 VMware
// Copyright © 2020 VMware
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
Expand All @@ -21,73 +21,78 @@ import (
v1 "k8s.io/api/core/v1"
)

// ingressStatusWriter manages the lifetime of StatusLoadBalancerUpdaters.
// loadBalancerStatusWriter manages the lifetime of IngressStatusUpdaters.
//
// The theory of operation of the ingressStatusWriter is as follows:
// 1. On startup the ingressStatusWriter waits to be elected leader.
// 2. Once elected leader, the ingressStatusWriter waits to receive a
// The theory of operation of the loadBalancerStatusWriter is as follows:
// 1. On startup the loadBalancerStatusWriter waits to be elected leader.
// 2. Once elected leader, the loadBalancerStatusWriter waits to receive a
// v1.LoadBalancerStatus value.
// 3. Once a v1.LoadBalancerStatus value has been received, any existing informer
// is stopped and a new informer started in its place.
// 4. Each informer is connected to a k8s.StatusLoadBalancerUpdater which reacts to
// is stopped and a new informer started in its place. This ensures that all existing
// Ingress objects will have OnAdd events fired to the new event handler.
// 4. Each informer is connected to a k8s.IngressStatusUpdater which reacts to
// OnAdd events for networking.k8s.io/ingress.v1beta1 objects. For each OnAdd
// the object is patched with the v1.LoadBalancerStatus value obtained on creation.
// OnUpdate and OnDelete events are ignored.If a new v1.LoadBalancerStatus value
// is been received, operation restarts at step 3.
// 5. If the worker is stopped, any existing informer is stopped before the worker stops.
type ingressStatusWriter struct {
type loadBalancerStatusWriter struct {
log logrus.FieldLogger
clients *k8s.Clients
isLeader chan struct{}
lbStatus chan v1.LoadBalancerStatus
}

func (isw *ingressStatusWriter) Start(stop <-chan struct{}) error {
func (isw *loadBalancerStatusWriter) Start(stop <-chan struct{}) error {

// await leadership election
// Await leadership election.
isw.log.Info("awaiting leadership election")
select {
case <-stop:
// asked to stop before elected leader
// We were asked to stop before elected leader.
return nil
case <-isw.isLeader:
isw.log.Info("elected leader")
}

var shutdown chan struct{}
var stopping sync.WaitGroup
var ingressInformers sync.WaitGroup
for {
select {
case <-stop:
// stop existing informer and shut down
// Use the shutdown channel to stop existing informer and shut down
if shutdown != nil {
close(shutdown)
}
stopping.Wait()
ingressInformers.Wait()
return nil
case lbs := <-isw.lbStatus:
// stop existing informer
// Stop the existing informer.
if shutdown != nil {
close(shutdown)
}
stopping.Wait()
ingressInformers.Wait()

// create informer for the new LoadBalancerStatus
isw.log.Info("Received a new address for status.loadBalancer")

// Create new informer for the new LoadBalancerStatus
factory := isw.clients.NewInformerFactory()
inf := factory.Networking().V1beta1().Ingresses().Informer()
log := isw.log.WithField("context", "IngressStatusLoadBalancerUpdater")
inf.AddEventHandler(&k8s.StatusLoadBalancerUpdater{
log := isw.log.WithField("context", "IngressStatusUpdater")
inf.AddEventHandler(&k8s.IngressStatusUpdater{
Client: isw.clients.ClientSet(),
Logger: log,
Status: lbs,
})

shutdown = make(chan struct{})
stopping.Add(1)
ingressInformers.Add(1)
fn := startInformer(factory, log)
go func() {
defer stopping.Done()
fn(shutdown)
defer ingressInformers.Done()
if err := fn(shutdown); err != nil {
return
}
}()
}
}
Expand Down
10 changes: 5 additions & 5 deletions cmd/contour/serve.go
Expand Up @@ -268,19 +268,19 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error {
// step 10. register leadership election
eventHandler.IsLeader = setupLeadershipElection(&g, log, ctx, clients, eventHandler.UpdateNow)

// step 11. set up ingress status writer
isw := ingressStatusWriter{
log: log.WithField("context", "ingressStatusWriter"),
// step 11. set up ingress load balancer status writer
lbsw := loadBalancerStatusWriter{
log: log.WithField("context", "loadBalancerStatusWriter"),
clients: clients,
isLeader: eventHandler.IsLeader,
lbStatus: make(chan v1.LoadBalancerStatus, 1),
}
g.Add(isw.Start)
g.Add(lbsw.Start)

// step 12. register an informer to watch envoy's service.
ssw := &k8s.ServiceStatusLoadBalancerWatcher{
ServiceName: ctx.envoyServiceName,
LBStatus: isw.lbStatus,
LBStatus: lbsw.lbStatus,
}
factory := clients.NewInformerFactoryForNamespace(ctx.envoyServiceNamespace)
factory.Core().V1().Services().Informer().AddEventHandler(ssw)
Expand Down
1 change: 1 addition & 0 deletions examples/contour/02-rbac.yaml
Expand Up @@ -60,6 +60,7 @@ rules:
- watch
- patch
- post
- update
- apiGroups: ["contour.heptio.com"]
resources: ["ingressroutes", "tlscertificatedelegations"]
verbs:
Expand Down
6 changes: 5 additions & 1 deletion examples/contour/03-envoy.yaml
@@ -1,12 +1,16 @@
---
apiVersion: apps/v1
kind: Deployment
kind: DaemonSet
metadata:
labels:
app: envoy
name: envoy
namespace: projectcontour
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 10%
selector:
matchLabels:
app: envoy
Expand Down
7 changes: 5 additions & 2 deletions examples/render/contour.yaml
Expand Up @@ -1429,7 +1429,7 @@ rules:
- list
- watch
- apiGroups:
- extensions
- "networking.k8s.io"
resources:
- ingresses
verbs:
Expand All @@ -1439,11 +1439,14 @@ rules:
- apiGroups:
- "networking.k8s.io"
resources:
- ingresses
- "ingresses/status"
verbs:
- get
- list
- watch
- patch
- post
- update
- apiGroups: ["contour.heptio.com"]
resources: ["ingressroutes", "tlscertificatedelegations"]
verbs:
Expand Down
2 changes: 1 addition & 1 deletion internal/contour/handler.go
Expand Up @@ -223,7 +223,7 @@ func (e *EventHandler) updateDAG() {
e.Metrics.SetIngressRouteMetric(metrics)
e.Metrics.SetHTTPProxyMetric(proxymetrics)
default:
e.Debug("skipping status update: not the leader")
e.Debug("skipping metrics and CRD status update, not leader")
}
}

Expand Down
8 changes: 4 additions & 4 deletions internal/k8s/ingressstatus.go
Expand Up @@ -23,13 +23,13 @@ import (
// StatusLoadbalancerUpdater observes informer OnAdd events and
// updates the ingress.status.loadBalancer field on all Ingress
// objects that match the ingress class (if used).
type StatusLoadBalancerUpdater struct {
type IngressStatusUpdater struct {
Client clientset.Interface
Logger logrus.FieldLogger
Status v1.LoadBalancerStatus
}

func (s *StatusLoadBalancerUpdater) OnAdd(obj interface{}) {
func (s *IngressStatusUpdater) OnAdd(obj interface{}) {
ing := obj.(*v1beta1.Ingress).DeepCopy()

// TODO(dfc) check ingress class
Expand All @@ -44,7 +44,7 @@ func (s *StatusLoadBalancerUpdater) OnAdd(obj interface{}) {
}
}

func (s *StatusLoadBalancerUpdater) OnUpdate(oldObj, newObj interface{}) {
func (s *IngressStatusUpdater) OnUpdate(oldObj, newObj interface{}) {
// Ignoring OnUpdate allows us to avoid the message generated
// from the status update.

Expand All @@ -55,7 +55,7 @@ func (s *StatusLoadBalancerUpdater) OnUpdate(oldObj, newObj interface{}) {
// of scope.
}

func (s *StatusLoadBalancerUpdater) OnDelete(obj interface{}) {
func (s *IngressStatusUpdater) OnDelete(obj interface{}) {
// we don't need to update the status on resources that
// have been deleted.
}
Expand Down
10 changes: 5 additions & 5 deletions internal/k8s/ingressstatus_test.go
Expand Up @@ -54,14 +54,14 @@ func TestServiceStatusLoadBalancerWatcherOnAdd(t *testing.T) {

// assert adding a service with the correct name generates a notification
svc.Name = sw.ServiceName
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{Hostname: "vmware.com"}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{Hostname: "projectcontour.io"}}
sw.OnAdd(&svc)
got, ok := recv()
if !ok {
t.Fatalf("expected result when adding a service with the correct name")
}
want := v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{{Hostname: "vmware.com"}},
Ingress: []v1.LoadBalancerIngress{{Hostname: "projectcontour.io"}},
}
assert.Equal(t, got, want)
}
Expand Down Expand Up @@ -102,14 +102,14 @@ func TestServiceStatusLoadBalancerWatcherOnUpdate(t *testing.T) {
// assert updating a service with the correct name generates a notification
var svc v1.Service
svc.Name = sw.ServiceName
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{Hostname: "vmware.com"}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{Hostname: "projectcontour.io"}}
sw.OnUpdate(&oldSvc, &svc)
got, ok := recv()
if !ok {
t.Fatalf("expected result when updating a service with the correct name")
}
want := v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{{Hostname: "vmware.com"}},
Ingress: []v1.LoadBalancerIngress{{Hostname: "projectcontour.io"}},
}
assert.Equal(t, got, want)
}
Expand Down Expand Up @@ -148,7 +148,7 @@ func TestServiceStatusLoadBalancerWatcherOnDelete(t *testing.T) {

// assert deleting a service with the correct name generates a blank notification
svc.Name = sw.ServiceName
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{Hostname: "vmware.com"}}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{Hostname: "projectcontour.io"}}
sw.OnDelete(&svc)
got, ok := recv()
if !ok {
Expand Down

0 comments on commit f1b2a4e

Please sign in to comment.