Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Add probe route to VirtualService spec.
Gofmt virtual_service.go
Fix virtual_service_test.go.
Basic StatusManager implementation.
Working state.
Cleanup
Remove useless logging statements.
Address vagababov comments
Moved imports to knative.dev
Add TestWithHostOption
Update TestIngressTypicalFlow
Fix existing tests
Implement status_test.go
Fix space/tab issue
Lint fixes
Log probing errors
  • Loading branch information
JRBANCEL committed Jul 17, 2019
1 parent 936b4b0 commit 754a463
Show file tree
Hide file tree
Showing 12 changed files with 843 additions and 31 deletions.
4 changes: 4 additions & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions pkg/apis/networking/v1alpha1/ingress_lifecycle.go
Expand Up @@ -71,6 +71,13 @@ func (is *IngressStatus) MarkLoadBalancerReady(lbs []LoadBalancerIngressStatus,
ingressCondSet.Manage(is).MarkTrue(IngressConditionLoadBalancerReady)
}

// MarkLoadBalancerPending marks the "IngressConditionLoadBalancerReady" condition to unknown to
// reflect that the load balancer is not ready yet.
func (is *IngressStatus) MarkLoadBalancerPending() {
ingressCondSet.Manage(is).MarkUnknown(IngressConditionLoadBalancerReady, "Uninitialized",
"Waiting for VirtualService to be ready")
}

// IsReady looks at the conditions and if the Status has a condition
// IngressConditionReady returns true if ConditionStatus is True
func (is *IngressStatus) IsReady() bool {
Expand Down
5 changes: 5 additions & 0 deletions pkg/apis/networking/v1alpha1/ingress_lifecycle_test.go
Expand Up @@ -80,6 +80,11 @@ func TestIngressTypicalFlow(t *testing.T) {
apitest.CheckConditionSucceeded(r.duck(), IngressConditionNetworkConfigured, t)
apitest.CheckConditionOngoing(r.duck(), IngressConditionReady, t)

// Then ingress is pending.
r.MarkLoadBalancerPending()
apitest.CheckConditionOngoing(r.duck(), IngressConditionLoadBalancerReady, t)
apitest.CheckConditionOngoing(r.duck(), IngressConditionReady, t)

// Then ingress has address.
r.MarkLoadBalancerReady(
[]LoadBalancerIngressStatus{{DomainInternal: "gateway.default.svc"}},
Expand Down
8 changes: 8 additions & 0 deletions pkg/network/prober/prober.go
Expand Up @@ -45,6 +45,14 @@ func WithHeader(name, value string) Preparer {
}
}

// WithHost sets the host in the probe request.
func WithHost(host string) Preparer {
return func(r *http.Request) *http.Request {
r.Host = host
return r
}
}

// ExpectsBody validates that the body of the probe response matches the provided string.
func ExpectsBody(body string) Verifier {
return func(r *http.Response, b []byte) (bool, error) {
Expand Down
40 changes: 40 additions & 0 deletions pkg/network/prober/prober_test.go
Expand Up @@ -244,6 +244,46 @@ func TestAsyncMultiple(t *testing.T) {
}
}

func TestWithHostOption(t *testing.T) {
host := "foobar.com"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Logf("Want: %s, Got: %s\n", host, r.Host)
if r.Host != host {
w.WriteHeader(404)
}
}))
defer ts.Close()

tests := []struct {
name string
options []interface{}
success bool
}{{
name: "no hosts",
success: false,
}, {
name: "expected host",
options: []interface{}{WithHost(host)},
success: true,
}, {
name: "wrong host",
options: []interface{}{WithHost("nope.com")},
success: false,
}}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ok, err := Do(context.Background(), network.AutoTransport, ts.URL, test.options...)
if err != nil {
t.Errorf("failed to probe: %v", err)
}
if ok != test.success {
t.Errorf("unexpected probe result: want: %v, got: %v", test.success, ok)
}
})
}
}

func (m *Manager) len() int {
m.mu.Lock()
defer m.mu.Unlock()
Expand Down
13 changes: 13 additions & 0 deletions pkg/reconciler/clusteringress/clusteringress.go
Expand Up @@ -22,9 +22,12 @@ import (
"knative.dev/serving/pkg/network"
"knative.dev/serving/pkg/reconciler"

"knative.dev/pkg/apis/istio/v1alpha3"
gatewayinformer "knative.dev/pkg/client/injection/informers/istio/v1alpha3/gateway"
virtualserviceinformer "knative.dev/pkg/client/injection/informers/istio/v1alpha3/virtualservice"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
podinformer "knative.dev/pkg/injection/informers/kubeinformers/corev1/pod"
"knative.dev/pkg/tracker"
"knative.dev/serving/pkg/apis/networking"
"knative.dev/serving/pkg/apis/serving/v1alpha1"
Expand Down Expand Up @@ -80,6 +83,8 @@ func (c *Reconciler) Init(ctx context.Context, cmw configmap.Watcher, impl *cont

c.Logger.Info("Setting up Ingress event handlers")
clusterIngressInformer := clusteringressinformer.Get(ctx)
gatewayInformer := gatewayinformer.Get(ctx)
podInformer := podinformer.Get(ctx)

myFilterFunc := reconciler.AnnotationFilterFunc(networking.IngressClassAnnotationKey, network.IstioIngressClassName, true)
clusterIngressHandler := cache.FilteringResourceEventHandler{
Expand All @@ -106,6 +111,14 @@ func (c *Reconciler) Init(ctx context.Context, cmw configmap.Watcher, impl *cont
configStore.WatchConfigs(cmw)
c.BaseIngressReconciler.ConfigStore = configStore

c.Logger.Info("Setting up StatusManager")
resyncIngressOnVirtualServiceReady := func(vs *v1alpha3.VirtualService) {
impl.EnqueueLabelOfClusterScopedResource(networking.ClusterIngressLabelKey)(vs)
}
statusProber := ing.NewStatusProber(c.Logger.Named("status-manager"), gatewayInformer.Lister(),
podInformer.Lister(), network.NewAutoTransport, resyncIngressOnVirtualServiceReady)
c.BaseIngressReconciler.StatusManager = statusProber
statusProber.Start(ctx.Done())
}

// Reconcile compares the actual state with the desired, and attempts to
Expand Down
71 changes: 52 additions & 19 deletions pkg/reconciler/clusteringress/clusteringress_test.go
Expand Up @@ -19,6 +19,7 @@ package clusteringress
import (
"context"
"fmt"
"log"
"testing"
"time"

Expand All @@ -27,6 +28,7 @@ import (
_ "knative.dev/pkg/client/injection/informers/istio/v1alpha3/gateway/fake"
_ "knative.dev/pkg/client/injection/informers/istio/v1alpha3/virtualservice/fake"
fakekubeclient "knative.dev/pkg/injection/clients/kubeclient/fake"
_ "knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/fake"
_ "knative.dev/pkg/injection/informers/kubeinformers/corev1/secret/fake"
fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake"
_ "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/clusteringress/fake"
Expand Down Expand Up @@ -184,9 +186,9 @@ func TestReconcile(t *testing.T) {
ingress("no-virtualservice-yet", 1234),
},
WantCreates: []runtime.Object{
resources.MakeMeshVirtualService(ingress("no-virtualservice-yet", 1234)),
resources.MakeIngressVirtualService(ingress("no-virtualservice-yet", 1234),
makeGatewayMap([]string{"knative-test-gateway", "knative-ingress-gateway"}, nil)),
insertProbe(resources.MakeMeshVirtualService(ingress("no-virtualservice-yet", 1234))),
insertProbe(resources.MakeIngressVirtualService(ingress("no-virtualservice-yet", 1234),
makeGatewayMap([]string{"knative-test-gateway", "knative-ingress-gateway"}, nil))),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: ingressWithStatus("no-virtualservice-yet", 1234,
Expand Down Expand Up @@ -263,11 +265,11 @@ func TestReconcile(t *testing.T) {
},
},
WantUpdates: []clientgotesting.UpdateActionImpl{{
Object: resources.MakeIngressVirtualService(ingress("reconcile-virtualservice", 1234),
makeGatewayMap([]string{"knative-test-gateway", "knative-ingress-gateway"}, nil)),
Object: insertProbe(resources.MakeIngressVirtualService(ingress("reconcile-virtualservice", 1234),
makeGatewayMap([]string{"knative-test-gateway", "knative-ingress-gateway"}, nil))),
}},
WantCreates: []runtime.Object{
resources.MakeMeshVirtualService(ingress("reconcile-virtualservice", 1234)),
insertProbe(resources.MakeMeshVirtualService(ingress("reconcile-virtualservice", 1234))),
},
WantDeletes: []clientgotesting.DeleteActionImpl{{
ActionImpl: clientgotesting.ActionImpl{
Expand Down Expand Up @@ -332,6 +334,11 @@ func TestReconcile(t *testing.T) {
ConfigStore: &testConfigStore{
config: ReconcilerTestConfig(),
},
StatusManager: &fakeStatusManager{
FakeIsReady: func(service *v1alpha3.VirtualService) (b bool, e error) {
return true, nil
},
},
},
clusterIngressLister: listers.GetClusterIngressLister(),
}
Expand All @@ -352,9 +359,9 @@ func TestReconcile_EnableAutoTLS(t *testing.T) {
// The creation of gateways are triggered when setting up the test.
gateway("knative-ingress-gateway", system.Namespace(), []v1alpha3.Server{irrelevantServer}),

resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234)),
resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil)),
insertProbe(resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234))),
insertProbe(resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil))),
},
WantUpdates: []clientgotesting.UpdateActionImpl{{
// ingressTLSServer needs to be added into Gateway.
Expand Down Expand Up @@ -415,9 +422,9 @@ func TestReconcile_EnableAutoTLS(t *testing.T) {
originSecret("istio-system", "secret0"),
},
WantCreates: []runtime.Object{
resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234)),
resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil)),
insertProbe(resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234))),
insertProbe(resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil))),
},
WantPatches: []clientgotesting.PatchActionImpl{
patchAddFinalizerAction("reconciling-clusteringress", clusterIngressFinalizer),
Expand Down Expand Up @@ -504,9 +511,9 @@ func TestReconcile_EnableAutoTLS(t *testing.T) {
// The creation of gateways are triggered when setting up the test.
gateway("knative-ingress-gateway", system.Namespace(), []v1alpha3.Server{irrelevantServer}),

resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234)),
resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil)),
insertProbe(resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234))),
insertProbe(resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil))),

// The secret copy under istio-system.
secret("istio-system", targetSecretName, map[string]string{
Expand Down Expand Up @@ -594,9 +601,9 @@ func TestReconcile_EnableAutoTLS(t *testing.T) {
WantCreates: []runtime.Object{
// The creation of gateways are triggered when setting up the test.
gateway("knative-ingress-gateway", system.Namespace(), []v1alpha3.Server{*withCredentialName(ingressTLSServer.DeepCopy(), targetSecretName), irrelevantServer}),
resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234)),
resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil)),
insertProbe(resources.MakeMeshVirtualService(ingress("reconciling-clusteringress", 1234))),
insertProbe(resources.MakeIngressVirtualService(ingress("reconciling-clusteringress", 1234),
makeGatewayMap([]string{"knative-ingress-gateway"}, nil))),
},
WantUpdates: []clientgotesting.UpdateActionImpl{{
Object: &corev1.Secret{
Expand Down Expand Up @@ -673,7 +680,7 @@ func TestReconcile_EnableAutoTLS(t *testing.T) {
WantCreates: []runtime.Object{
// The creation of gateways are triggered when setting up the test.
gateway("knative-ingress-gateway", system.Namespace(), []v1alpha3.Server{irrelevantServer}),
resources.MakeMeshVirtualService(ingressWithTLSClusterLocal("reconciling-clusteringress", 1234, []v1alpha1.IngressTLS{})),
insertProbe(resources.MakeMeshVirtualService(ingressWithTLSClusterLocal("reconciling-clusteringress", 1234, []v1alpha1.IngressTLS{}))),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: ingressWithTLSAndStatusClusterLocal("reconciling-clusteringress", 1234,
Expand Down Expand Up @@ -750,6 +757,11 @@ func TestReconcile_EnableAutoTLS(t *testing.T) {
},
},
},
StatusManager: &fakeStatusManager{
FakeIsReady: func(service *v1alpha3.VirtualService) (b bool, e error) {
return true, nil
},
},
},
clusterIngressLister: listers.GetClusterIngressLister(),
}
Expand Down Expand Up @@ -924,6 +936,12 @@ func newTestSetup(t *testing.T, configs ...*corev1.ConfigMap) (
configMapWatcher := &configmap.ManualWatcher{Namespace: system.Namespace()}
controller := NewController(ctx, configMapWatcher)

controller.Reconciler.(*Reconciler).StatusManager = &fakeStatusManager{
FakeIsReady: func(vs *v1alpha3.VirtualService) (b bool, e error) {
return true, nil
},
}

cms := append([]*corev1.ConfigMap{{
ObjectMeta: metav1.ObjectMeta{
Name: config.IstioConfigName,
Expand Down Expand Up @@ -1141,3 +1159,18 @@ func makeGatewayMap(publicGateways []string, privateGateways []string) map[v1alp
v1alpha1.IngressVisibilityClusterLocal: privateGateways,
}
}

func insertProbe(vs *v1alpha3.VirtualService) *v1alpha3.VirtualService {
if _, err := resources.InsertProbe(vs); err != nil {
log.Fatalf("failed to insert probe: %v", err)
}
return vs
}

type fakeStatusManager struct {
FakeIsReady func(*v1alpha3.VirtualService) (bool, error)
}

func (m *fakeStatusManager) IsReady(vs *v1alpha3.VirtualService) (bool, error) {
return m.FakeIsReady(vs)
}
37 changes: 28 additions & 9 deletions pkg/reconciler/ingress/ingress.go
Expand Up @@ -89,6 +89,8 @@ type BaseIngressReconciler struct {

Tracker tracker.Interface
Finalizer string

StatusManager StatusManager
}

// NewBaseIngressReconciler creates a new BaseIngressReconciler
Expand Down Expand Up @@ -249,7 +251,10 @@ func (r *BaseIngressReconciler) reconcileIngress(ctx context.Context, ra Reconci
logger.Infof("Reconciling ingress: %#v", ia)

gatewayNames := gatewayNamesFromContext(ctx)
vses := resources.MakeVirtualServices(ia, gatewayNames)
vses, err := resources.MakeVirtualServices(ia, gatewayNames)
if err != nil {
return err
}

// First, create the VirtualServices.
logger.Infof("Creating/Updating VirtualServices")
Expand All @@ -258,17 +263,31 @@ func (r *BaseIngressReconciler) reconcileIngress(ctx context.Context, ra Reconci
// when error reconciling VirtualService?
return err
}
// As underlying network programming (VirtualService now) is stateless,
// here we simply mark the ingress as ready if the VirtualService
// is successfully synced.

// Update status
ia.GetStatus().MarkNetworkConfigured()
ia.GetStatus().ObservedGeneration = ia.GetGeneration()

lbReady := true
for _, vs := range vses {
ready, err := r.StatusManager.IsReady(vs)
if err != nil {
return fmt.Errorf("failed to probe VirtualService %s/%s: %v", vs.Namespace, vs.Name, err)
}

lbs := getLBStatus(gatewayServiceURLFromContext(ctx, ia))
publicLbs := getLBStatus(publicGatewayServiceURLFromContext(ctx))
privateLbs := getLBStatus(privateGatewayServiceURLFromContext(ctx))
// We don't break as soon as one VirtualService is not ready because IsReady
// need to be called on every VirtualService to trigger polling.
lbReady = lbReady && ready
}
if lbReady {
lbs := getLBStatus(gatewayServiceURLFromContext(ctx, ia))
publicLbs := getLBStatus(publicGatewayServiceURLFromContext(ctx))
privateLbs := getLBStatus(privateGatewayServiceURLFromContext(ctx))

ia.GetStatus().MarkLoadBalancerReady(lbs, publicLbs, privateLbs)
ia.GetStatus().ObservedGeneration = ia.GetGeneration()
ia.GetStatus().MarkLoadBalancerReady(lbs, publicLbs, privateLbs)
} else {
ia.GetStatus().MarkLoadBalancerPending()
}

if enableReconcileGateway(ctx) {
if !ia.IsPublic() {
Expand Down

0 comments on commit 754a463

Please sign in to comment.