From 0b5293b5bcb2c6187f77cc2e970f65edce9a953d Mon Sep 17 00:00:00 2001 From: Jakub Jarosz Date: Mon, 12 Dec 2022 22:34:29 +0000 Subject: [PATCH 01/10] Add healthcheck for transport server --- go.mod | 2 +- go.sum | 4 +- internal/configs/configurator.go | 62 +++++-- internal/configs/configurator_test.go | 118 +++++++++++++ internal/configs/transportserver.go | 10 +- internal/configs/virtualserver.go | 7 - internal/healthcheck/healthcheck.go | 87 +++++++-- internal/healthcheck/healthcheck_test.go | 214 +++++++++++++++++++++++ 8 files changed, 464 insertions(+), 40 deletions(-) diff --git a/go.mod b/go.mod index 16cfdcda87..79861f928f 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.13.25 github.com/cert-manager/cert-manager v1.10.1 - github.com/go-chi/chi v1.5.4 github.com/golang-jwt/jwt/v4 v4.4.3 + github.com/go-chi/chi/v5 v5.0.8 github.com/golang/glog v1.0.0 github.com/google/go-cmp v0.5.9 github.com/kr/pretty v0.3.1 diff --git a/go.sum b/go.sum index d0f6a515b6..559016b0d8 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs= -github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg= +github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= +github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= diff --git a/internal/configs/configurator.go b/internal/configs/configurator.go index ff54420919..bf398cc044 100644 --- a/internal/configs/configurator.go +++ b/internal/configs/configurator.go @@ -111,6 +111,7 @@ type Configurator struct { ingresses map[string]*IngressEx minions map[string]map[string]bool virtualServers map[string]*VirtualServerEx + transportServers map[string]*TransportServerEx tlsPassthroughPairs map[string]tlsPassthroughPair isWildcardEnabled bool isPlus bool @@ -145,6 +146,7 @@ func NewConfigurator(nginxManager nginx.Manager, staticCfgParams *StaticConfigPa cfgParams: config, ingresses: make(map[string]*IngressEx), virtualServers: make(map[string]*VirtualServerEx), + transportServers: make(map[string]*TransportServerEx), templateExecutor: templateExecutor, templateExecutorV2: templateExecutorV2, minions: make(map[string]map[string]bool), @@ -265,8 +267,8 @@ func (cnf *Configurator) AddOrUpdateIngress(ingEx *IngressEx) (Warnings, error) return warnings, nil } -// GetVirtualServerForHost takes a hostname and returns a VS for the given hostname. -func (cnf *Configurator) GetVirtualServerForHost(hostname string) *conf_v1.VirtualServer { +// virtualServerForHost takes a hostname and returns a VS for the given hostname. +func (cnf *Configurator) virtualServerForHost(hostname string) *conf_v1.VirtualServer { for _, vsEx := range cnf.virtualServers { if vsEx.VirtualServer.Spec.Host == hostname { return vsEx.VirtualServer @@ -275,8 +277,8 @@ func (cnf *Configurator) GetVirtualServerForHost(hostname string) *conf_v1.Virtu return nil } -// GetUpstreamsforVirtualServer takes VS and returns a slice of upstreams. -func (cnf *Configurator) GetUpstreamsforVirtualServer(vs *conf_v1.VirtualServer) []string { +// upstreamsForVirtualServer takes VirtualServer and returns a list of associated upstreams. +func (cnf *Configurator) upstreamsForVirtualServer(vs *conf_v1.VirtualServer) []string { glog.V(3).Infof("Get upstreamName for vs: %s", vs.Spec.Host) upstreamNames := make([]string, 0, len(vs.Spec.Upstreams)) @@ -290,18 +292,52 @@ func (cnf *Configurator) GetUpstreamsforVirtualServer(vs *conf_v1.VirtualServer) return upstreamNames } -// GetUpstreamsforHost takes a hostname and returns a slice of upstreams -// for the given hostname. -func (cnf *Configurator) GetUpstreamsforHost(hostname string) []string { +// UpstreamsForHost takes a hostname and returns upstreams for the given hostname. +func (cnf *Configurator) UpstreamsForHost(hostname string) []string { glog.V(3).Infof("Get upstream for host: %s", hostname) - vs := cnf.GetVirtualServerForHost(hostname) - + vs := cnf.virtualServerForHost(hostname) if vs != nil { - return cnf.GetUpstreamsforVirtualServer(vs) + return cnf.upstreamsForVirtualServer(vs) + } + return nil +} + +// StreamUpstreamsForName takes a name and returns stream upstreams +// associated with this name. The name represents TS's +// (TransportServer) action name. +func (cnf *Configurator) StreamUpstreamsForName(name string) []string { + glog.V(3).Infof("Get stream upstreams for name: '%s'", name) + ts := cnf.transportServerForActionName(name) + if ts != nil { + return cnf.streamUpstreamsForTransportServer(ts) + } + return nil +} + +// transportServerForActionName takes an action name and returns +// Transport Server obj associated with that name. +func (cnf *Configurator) transportServerForActionName(name string) *conf_v1alpha1.TransportServer { + for _, tsEx := range cnf.transportServers { + glog.V(3).Infof("Check ts action '%s' for requested name: '%s'", tsEx.TransportServer.Spec.Action.Pass, name) + if tsEx.TransportServer.Spec.Action.Pass == name { + return tsEx.TransportServer + } } return nil } +// streamUpstreamsForTransportServer takes TransportServer obj and returns +// a list of stream upstreams associated with this TransportServer. +func (cnf *Configurator) streamUpstreamsForTransportServer(ts *conf_v1alpha1.TransportServer) []string { + upstreamNames := make([]string, 0, len(ts.Spec.Upstreams)) + n := newUpstreamNamerForTransportServer(ts) + for _, u := range ts.Spec.Upstreams { + un := n.GetNameForUpstream(u.Name) + upstreamNames = append(upstreamNames, un) + } + return upstreamNames +} + func (cnf *Configurator) addOrUpdateIngress(ingEx *IngressEx) (Warnings, error) { apResources := cnf.updateApResources(ingEx) @@ -489,8 +525,7 @@ func (cnf *Configurator) AddOrUpdateVirtualServer(virtualServerEx *VirtualServer } func (cnf *Configurator) addOrUpdateOpenTracingTracerConfig(content string) error { - err := cnf.nginxManager.CreateOpenTracingTracerConfig(content) - return err + return cnf.nginxManager.CreateOpenTracingTracerConfig(content) } func (cnf *Configurator) addOrUpdateVirtualServer(virtualServerEx *VirtualServerEx) (Warnings, error) { @@ -632,9 +667,10 @@ func (cnf *Configurator) addOrUpdateTransportServer(transportServerEx *Transport if cnf.isPlus && cnf.isPrometheusEnabled { cnf.updateTransportServerMetricsLabels(transportServerEx, tsCfg.Upstreams) } - cnf.nginxManager.CreateStreamConfig(name, content) + cnf.transportServers[name] = transportServerEx + // update TLS Passthrough Hosts config in case we have a TLS Passthrough TransportServer // only TLS Passthrough TransportServers have non-empty hosts if transportServerEx.TransportServer.Spec.Host != "" { diff --git a/internal/configs/configurator_test.go b/internal/configs/configurator_test.go index 42a8648392..b6ad8ec1bc 100644 --- a/internal/configs/configurator_test.go +++ b/internal/configs/configurator_test.go @@ -1281,3 +1281,121 @@ func TestUpdateApResourcesForVs(t *testing.T) { } } } + +func TestUpstreamsForHost_ReturnsNilForNoVirtualServers(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.virtualServers = map[string]*VirtualServerEx{ + "vs": invalidVirtualServerEx, + } + + got := tcnf.UpstreamsForHost("tea.example.com") + if got != nil { + t.Errorf("want nil, got %+v", got) + } +} + +func TestUpstreamsForHost_DoesNotReturnUpstreamsOnBogusHostname(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.virtualServers = map[string]*VirtualServerEx{ + "vs": validVirtualServerExWithUpstreams, + } + + got := tcnf.UpstreamsForHost("bogus.host.org") + if got != nil { + t.Errorf("want nil, got %+v", got) + } +} + +func TestUpstreamsForHost_ReturnsUpstreamsNamesForValidHostname(t *testing.T) { + t.Parallel() + tcnf := createTestConfigurator(t) + tcnf.virtualServers = map[string]*VirtualServerEx{ + "vs": validVirtualServerExWithUpstreams, + } + + want := []string{"vs_default_test-vs_tea-app"} + got := tcnf.UpstreamsForHost("tea.example.com") + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +func TestStreamUpstreamsForName_DoesNotReturnUpstreamsForBogusName(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.transportServers = map[string]*TransportServerEx{ + "ts": validTransportServerExWithUpstreams, + } + + got := tcnf.StreamUpstreamsForName("bogus-service-name") + if got != nil { + t.Errorf("want nil, got %+v", got) + } +} + +func TestStreamUpstreamsForName_ReturnsStreamUpstreamsNamesOnValidServiceName(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.transportServers = map[string]*TransportServerEx{ + "ts": validTransportServerExWithUpstreams, + } + + want := []string{"ts_default_secure-app_secure-app"} + got := tcnf.StreamUpstreamsForName("secure-app") + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +var ( + invalidVirtualServerEx = &VirtualServerEx{ + VirtualServer: &conf_v1.VirtualServer{}, + } + validVirtualServerExWithUpstreams = &VirtualServerEx{ + VirtualServer: &conf_v1.VirtualServer{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "test-vs", + Namespace: "default", + }, + Spec: conf_v1.VirtualServerSpec{ + Host: "tea.example.com", + Upstreams: []conf_v1.Upstream{ + { + Name: "tea-app", + }, + }, + }, + }, + } + validTransportServerExWithUpstreams = &TransportServerEx{ + TransportServer: &conf_v1alpha1.TransportServer{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "secure-app", + Namespace: "default", + }, + Spec: conf_v1alpha1.TransportServerSpec{ + Listener: conf_v1alpha1.TransportServerListener{ + Name: "tls-passthrough", + Protocol: "TLS_PASSTHROUGH", + }, + Host: "example.com", + Upstreams: []conf_v1alpha1.Upstream{ + { + Name: "secure-app", + Service: "secure-app", + Port: 8443, + }, + }, + Action: &conf_v1alpha1.Action{ + Pass: "secure-app", + }, + }, + }, + } +) diff --git a/internal/configs/transportserver.go b/internal/configs/transportserver.go index b1db784a38..ba7d0b9e3a 100644 --- a/internal/configs/transportserver.go +++ b/internal/configs/transportserver.go @@ -24,14 +24,18 @@ func (tsEx *TransportServerEx) String() string { if tsEx == nil { return "" } - if tsEx.TransportServer == nil { return "TransportServerEx has no TransportServer" } - return fmt.Sprintf("%s/%s", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name) } +func newUpstreamNamerForTransportServer(transportServer *conf_v1alpha1.TransportServer) *upstreamNamer { + return &upstreamNamer{ + prefix: fmt.Sprintf("ts_%s_%s", transportServer.Namespace, transportServer.Name), + } +} + // generateTransportServerConfig generates a full configuration for a TransportServer. func generateTransportServerConfig(transportServerEx *TransportServerEx, listenerPort int, isPlus bool, isResolverConfigured bool) (*version2.TransportServerConfig, Warnings) { upstreamNamer := newUpstreamNamerForTransportServer(transportServerEx.TransportServer) @@ -105,7 +109,6 @@ func generateUnixSocket(transportServerEx *TransportServerEx) string { if transportServerEx.TransportServer.Spec.Listener.Name == conf_v1alpha1.TLSPassthroughListenerName { return fmt.Sprintf("unix:/var/lib/nginx/passthrough-%s_%s.sock", transportServerEx.TransportServer.Namespace, transportServerEx.TransportServer.Name) } - return "" } @@ -172,7 +175,6 @@ func generateTransportServerHealthCheck(upstreamName string, generatedUpstreamNa break } } - return hc, match } diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go index da05e3b787..46c37caa99 100644 --- a/internal/configs/virtualserver.go +++ b/internal/configs/virtualserver.go @@ -8,7 +8,6 @@ import ( "github.com/golang/glog" "github.com/nginxinc/kubernetes-ingress/internal/k8s/secrets" "github.com/nginxinc/kubernetes-ingress/internal/nginx" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" api_v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" @@ -158,12 +157,6 @@ func NewUpstreamNamerForVirtualServerRoute(virtualServer *conf_v1.VirtualServer, } } -func newUpstreamNamerForTransportServer(transportServer *conf_v1alpha1.TransportServer) *upstreamNamer { - return &upstreamNamer{ - prefix: fmt.Sprintf("ts_%s_%s", transportServer.Namespace, transportServer.Name), - } -} - func (namer *upstreamNamer) GetNameForUpstreamFromAction(action *conf_v1.Action) string { var upstream string if action.Proxy != nil && action.Proxy.Upstream != "" { diff --git a/internal/healthcheck/healthcheck.go b/internal/healthcheck/healthcheck.go index b25fe58bf7..b299b01aed 100644 --- a/internal/healthcheck/healthcheck.go +++ b/internal/healthcheck/healthcheck.go @@ -14,7 +14,7 @@ import ( v1 "k8s.io/api/core/v1" - "github.com/go-chi/chi" + "github.com/go-chi/chi/v5" "github.com/golang/glog" "github.com/nginxinc/kubernetes-ingress/internal/configs" "github.com/nginxinc/nginx-plus-go-client/client" @@ -35,10 +35,12 @@ func RunHealthCheck(port int, plusClient *client.NginxClient, cnf *configs.Confi // HealthServer holds data required for running // the healthcheck server. type HealthServer struct { - Server *http.Server - URL string - UpstreamsForHost func(host string) []string - NginxUpstreams func() (*client.Upstreams, error) + Server *http.Server + URL string + UpstreamsForHost func(host string) []string + NginxUpstreams func() (*client.Upstreams, error) + StreamUpstreamsForName func(host string) []string + NginxStreamUpstreams func() (*client.StreamUpstreams, error) } // NewHealthServer creates Health Server. If secret is provided, @@ -50,9 +52,11 @@ func NewHealthServer(addr string, nc *client.NginxClient, cnf *configs.Configura ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, }, - URL: fmt.Sprintf("http://%s/", addr), - UpstreamsForHost: cnf.GetUpstreamsforHost, - NginxUpstreams: nc.GetUpstreams, + URL: fmt.Sprintf("http://%s/", addr), + UpstreamsForHost: cnf.UpstreamsForHost, + NginxUpstreams: nc.GetUpstreams, + StreamUpstreamsForName: cnf.StreamUpstreamsForName, + NginxStreamUpstreams: nc.GetStreamUpstreams, } if secret != nil { @@ -72,7 +76,8 @@ func NewHealthServer(addr string, nc *client.NginxClient, cnf *configs.Configura // ListenAndServe starts healthcheck server. func (hs *HealthServer) ListenAndServe() error { mux := chi.NewRouter() - mux.Get("/probe/{hostname}", hs.Retrieve) + mux.Get("/probe/{hostname}", hs.UpstreamStats) + mux.Get("/probe/ts/{name}", hs.StreamStats) hs.Server.Handler = mux if hs.Server.TLSConfig != nil { return hs.Server.ListenAndServeTLS("", "") @@ -85,8 +90,8 @@ func (hs *HealthServer) Shutdown(ctx context.Context) error { return hs.Server.Shutdown(ctx) } -// Retrieve finds health stats for the host identified by a hostname in the request URL. -func (hs *HealthServer) Retrieve(w http.ResponseWriter, r *http.Request) { +// UpstreamStats calculates health stats for the host identified by the hostname in the request URL. +func (hs *HealthServer) UpstreamStats(w http.ResponseWriter, r *http.Request) { hostname := chi.URLParam(r, "hostname") host := sanitize(hostname) @@ -124,6 +129,43 @@ func (hs *HealthServer) Retrieve(w http.ResponseWriter, r *http.Request) { } } +// StreamStats calculates health stats for the TransportServer(s) +// identified by the service (action) name in the request URL. +func (hs *HealthServer) StreamStats(w http.ResponseWriter, r *http.Request) { + name := chi.URLParam(r, "name") + n := sanitize(name) + streamUpstreamNames := hs.StreamUpstreamsForName(n) + if len(streamUpstreamNames) == 0 { + glog.Errorf("no stream upstreams for requested name '%s' or name does not exist", n) + w.WriteHeader(http.StatusNotFound) + return + } + streams, err := hs.NginxStreamUpstreams() + if err != nil { + glog.Errorf("error retrieving stream upstreams for requested name: %s", n) + w.WriteHeader(http.StatusInternalServerError) + return + } + stats := countStreamStats(streams, streamUpstreamNames) + data, err := json.Marshal(stats) + if err != nil { + glog.Error("error marshaling result", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + switch stats.Up { + case 0: + w.WriteHeader(http.StatusServiceUnavailable) + default: + w.WriteHeader(http.StatusOK) + } + if _, err := w.Write(data); err != nil { + glog.Error("error writing result", err) + http.Error(w, "internal error", http.StatusInternalServerError) + } +} + func sanitize(s string) string { hostname := strings.TrimSpace(s) hostname = strings.ReplaceAll(hostname, "\n", "") @@ -169,11 +211,30 @@ func countStats(upstreams *client.Upstreams, upstreamNames []string) HostStats { up++ } } + return HostStats{ + Total: total, + Up: up, + Unhealthy: total - up, + } +} - unhealthy := total - up +func countStreamStats(streams *client.StreamUpstreams, streamUpstreamNames []string) HostStats { + total, up := 0, 0 + for name, s := range *streams { + if !slices.Contains(streamUpstreamNames, name) { + continue + } + for _, p := range s.Peers { + total++ + if strings.ToLower(p.State) != "up" { + continue + } + up++ + } + } return HostStats{ Total: total, Up: up, - Unhealthy: unhealthy, + Unhealthy: total - up, } } diff --git a/internal/healthcheck/healthcheck_test.go b/internal/healthcheck/healthcheck_test.go index 24ab5aa81b..71e1333d5f 100644 --- a/internal/healthcheck/healthcheck_test.go +++ b/internal/healthcheck/healthcheck_test.go @@ -196,6 +196,135 @@ func TestHealthCheckServer_RespondsWith500OnErrorFromNGINXAPI(t *testing.T) { } } +func TestHealthCheckServer_Returns404OnMissingTransportServerActionName(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllUp + + resp, err := http.Get(hs.URL + "probe/ts/") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusNotFound { + t.Error(resp.StatusCode) + } +} + +func TestHealthCheckServer_Returns404OnBogusTransportServerActionName(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllUp + + resp, err := http.Get(hs.URL + "probe/ts/bogusname") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusNotFound { + t.Error(resp.StatusCode) + } +} + +func TestHealthCheckServer_ReturnsCorrectTransportServerStatsForNameOnAllPeersUp(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllUp + + resp, err := http.Get(hs.URL + "probe/ts/foo-app") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusOK { + t.Fatal(resp.StatusCode) + } + + want := healthcheck.HostStats{ + Total: 6, + Up: 6, + Unhealthy: 0, + } + var got healthcheck.HostStats + if err := json.NewDecoder(resp.Body).Decode(&got); err != nil { + t.Fatal(err) + } + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +func TestHealthCheckServer_ReturnsCorrectTransportServerStatsForNameOnSomePeersUpSomeDown(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXPartiallyUp + + resp, err := http.Get(hs.URL + "probe/ts/foo-app") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusOK { + t.Fatal(resp.StatusCode) + } + + want := healthcheck.HostStats{ + Total: 6, + Up: 4, + Unhealthy: 2, + } + var got healthcheck.HostStats + if err := json.NewDecoder(resp.Body).Decode(&got); err != nil { + t.Fatal(err) + } + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +func TestHealthCheckServer_ReturnsCorrectTransportServerStatsForNameOnAllPeersDown(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllPeersDown + + resp, err := http.Get(hs.URL + "probe/ts/foo-app") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusServiceUnavailable { + t.Fatal(resp.StatusCode) + } + + want := healthcheck.HostStats{ + Total: 6, + Up: 0, + Unhealthy: 6, + } + var got healthcheck.HostStats + if err := json.NewDecoder(resp.Body).Decode(&got); err != nil { + t.Fatal(err) + } + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + // getUpstreamsForHost is a helper func faking response from IC. func getUpstreamsForHost(host string) []string { upstreams := map[string][]string{ @@ -325,3 +454,88 @@ func getUpstreamsFromNGINXNotExistingHost() (*client.Upstreams, error) { func getUpstreamsFromNGINXErrorFromAPI() (*client.Upstreams, error) { return nil, errors.New("nginx api error") } + +// streamUpstreamsForName is a helper func faking response from IC. +func streamUpstreamsForName(name string) []string { + upstreams := map[string][]string{ + "foo-app": {"streamUpstream1", "streamUpstream2"}, + "bar-app": {"streamUpstream1"}, + } + u, ok := upstreams[name] + if !ok { + return []string{} + } + return u +} + +// streamUpstreamsFromNGINXAllUp is a helper func +// for faking response from NGINX Plus client. +// +//nolint:unparam +func streamUpstreamsFromNGINXAllUp() (*client.StreamUpstreams, error) { + streamUpstreams := client.StreamUpstreams{ + "streamUpstream1": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Up"}, + {State: "Up"}, + {State: "Up"}, + }, + }, + "streamUpstream2": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Up"}, + {State: "Up"}, + {State: "Up"}, + }, + }, + } + return &streamUpstreams, nil +} + +// streamUpstreamsFromNGINXPartiallyUp is a helper func +// for faking response from NGINX Plus client. +// +//nolint:unparam +func streamUpstreamsFromNGINXPartiallyUp() (*client.StreamUpstreams, error) { + streamUpstreams := client.StreamUpstreams{ + "streamUpstream1": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Up"}, + {State: "Down"}, + {State: "Up"}, + }, + }, + "streamUpstream2": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Down"}, + {State: "Up"}, + {State: "Up"}, + }, + }, + } + return &streamUpstreams, nil +} + +// streamUpstreamsFromNGINXAllPeersDown is a helper func +// for faking response from NGINX Plus client. +// +//nolint:unparam +func streamUpstreamsFromNGINXAllPeersDown() (*client.StreamUpstreams, error) { + streamUpstreams := client.StreamUpstreams{ + "streamUpstream1": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Down"}, + {State: "Down"}, + {State: "Down"}, + }, + }, + "streamUpstream2": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Down"}, + {State: "Down"}, + {State: "Down"}, + }, + }, + } + return &streamUpstreams, nil +} From 920fcdeff40666864ba823b966798cedddeaba49 Mon Sep 17 00:00:00 2001 From: Jakub Jarosz Date: Mon, 12 Dec 2022 22:50:51 +0000 Subject: [PATCH 02/10] Update go.mod --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 79861f928f..135589ccbb 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.13.25 github.com/cert-manager/cert-manager v1.10.1 - github.com/golang-jwt/jwt/v4 v4.4.3 github.com/go-chi/chi/v5 v5.0.8 + github.com/golang-jwt/jwt/v4 v4.4.3 github.com/golang/glog v1.0.0 github.com/google/go-cmp v0.5.9 github.com/kr/pretty v0.3.1 From 302a89e5470e8d62c1bd4a748b43fd6801e13af3 Mon Sep 17 00:00:00 2001 From: Jakub Jarosz Date: Tue, 13 Dec 2022 10:51:35 +0000 Subject: [PATCH 03/10] Update docs for Service Insight This update includes information about healthcheck monitoring of Transport Servers. It provides info about additional probe path used for Transport Servers --- .../logging-and-monitoring/service-insight.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/content/logging-and-monitoring/service-insight.md b/docs/content/logging-and-monitoring/service-insight.md index f55ee5a5f8..b1d977b8e3 100644 --- a/docs/content/logging-and-monitoring/service-insight.md +++ b/docs/content/logging-and-monitoring/service-insight.md @@ -11,7 +11,7 @@ docs: "DOCS-000" --- -The Ingress Controller exposes an endpoint and provides host statistics for Virtual Servers (VS). +The Ingress Controller exposes an endpoint and provides host statistics for Virtual Servers (VS) and Transport Servers (TS). It exposes data in JSON format and returns HTTP status codes. The response body holds information about the total, down and the unhealthy number of upstreams associated with the hostname. @@ -25,7 +25,7 @@ The service is healthy if at least one upstream is healthy. In this case, the en ## Enabling Service Insight Endpoint If you're using *Kubernetes manifests* (Deployment or DaemonSet) to install the Ingress Controller, to enable the Service Insight endpoint: -1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via the path `/probe/{hostname}` on port `9114` (customizable with the `-service-insight-listen-port` command-line argument). +1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via paths `/probe/{hostname}` for Virtual Servers, and `/probe/ts/{name}` for Transport Servers on port `9114` (customizable with the `-service-insight-listen-port` command-line argument). 1. To enable TLS for the Service Insight endpoint, configure the `-service-insight-tls-secret` cli argument with the namespace and name of a TLS Secret. 1. Add the Service Insight port to the list of the ports of the Ingress Controller container in the template of the Ingress Controller pod: ```yaml @@ -39,9 +39,9 @@ If you're using *Helm* to install the Ingress Controller, to enable Service Insi The Service Insight provides the following statistics: -* Total number of VS -* Number of VS in 'Down' state -* Number of VS in 'Healthy' state +* Total number of VS and TS +* Number of VS and TS in 'Down' state +* Number of VS and TS in 'Healthy' state These statistics are returned as JSON: @@ -52,7 +52,7 @@ These statistics are returned as JSON: Response codes: * HTTP 200 OK - Service is healthy -* HTTP 404 - No upstreams/VS found for the requested hostname -* HTTP 503 Service Unavailable - The service is down (All upstreams/VS are "Unhealthy") +* HTTP 404 - No upstreams/VS/TS found for the requested hostname / name +* HTTP 503 Service Unavailable - The service is down (All upstreams/VS/TS are "Unhealthy") **Note**: wildcards in hostnames are not supported at the moment. From 211e9f5e5662f168514442427d42a72f4a6a25cd Mon Sep 17 00:00:00 2001 From: Jakub Jarosz Date: Tue, 13 Dec 2022 18:10:01 +0000 Subject: [PATCH 04/10] Add python test for TS service insight http --- .../test_transport_server_service_insight.py | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 tests/suite/test_transport_server_service_insight.py diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py new file mode 100644 index 0000000000..776b62cdd0 --- /dev/null +++ b/tests/suite/test_transport_server_service_insight.py @@ -0,0 +1,57 @@ +from unittest import mock + +import pytest +import requests +from settings import TEST_DATA +from suite.utils.resources_utils import ( + create_secret_from_yaml, + delete_secret, + ensure_response_from_backend, + patch_deployment_from_yaml, + wait_before_test, + get_ts_nginx_template_conf +) +from suite.utils.yaml_utils import get_name_from_yaml + + +@pytest.mark.ts +@pytest.mark.skip_from_nginx_oss +@pytest.mark.parametrize( + "crd_ingress_controller, transport_server_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + f"-global-configuration=nginx-ingress/nginx-configuration", + f"-enable-leader-election=false", + f"-enable-snippets", + f"-enable-custom-resources", + f"-enable-service-insight", + ], + }, + {"example": "transport-server-status"}, + ) + ], + indirect=True, +) +class TestHealthCheckTransportServer: + def test_responses_svc_insight_http( + self, request, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites, ingress_controller_endpoint + ): + """Test responses from service insight enpoint with http""" + retry = 0 + resp = mock.Mock() + resp.json.return_value = {} + resp.status_code == 502 + ts_source = f"{TEST_DATA}/transport-server-tcp-load-balance/standard/service_deployment.yaml" + name = get_name_from_yaml(ts_source) + req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/ts/{name}" + ensure_response_from_backend(req_url, transport_server_setup.ts_host) + while (resp.json() != {"Total": 3, "Up": 3, "Unhealthy": 0}) and retry < 5: + resp = requests.get(req_url) + wait_before_test() + retry = +1 + + assert resp.status_code == 200, f"Expected 200 code for /probe/ts/{name} but got {resp.status_code}" + assert resp.json() == {"Total": 1, "Up": 1, "Unhealthy": 0} From 18afe288b78cd3873bd2b176da4aa54ff2906555 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 18:13:36 +0000 Subject: [PATCH 05/10] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/suite/test_transport_server_service_insight.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py index 776b62cdd0..9b35e47f24 100644 --- a/tests/suite/test_transport_server_service_insight.py +++ b/tests/suite/test_transport_server_service_insight.py @@ -7,9 +7,9 @@ create_secret_from_yaml, delete_secret, ensure_response_from_backend, + get_ts_nginx_template_conf, patch_deployment_from_yaml, wait_before_test, - get_ts_nginx_template_conf ) from suite.utils.yaml_utils import get_name_from_yaml @@ -37,7 +37,13 @@ ) class TestHealthCheckTransportServer: def test_responses_svc_insight_http( - self, request, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites, ingress_controller_endpoint + self, + request, + kube_apis, + crd_ingress_controller, + transport_server_setup, + ingress_controller_prerequisites, + ingress_controller_endpoint, ): """Test responses from service insight enpoint with http""" retry = 0 From 3d8a2f7f9db561aeb2215ec3274d3bbeda5fbbbe Mon Sep 17 00:00:00 2001 From: Jakub Jarosz Date: Tue, 3 Jan 2023 13:17:55 +0000 Subject: [PATCH 06/10] Update docs --- docs/content/logging-and-monitoring/service-insight.md | 10 +++++----- tests/suite/test_transport_server_service_insight.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/content/logging-and-monitoring/service-insight.md b/docs/content/logging-and-monitoring/service-insight.md index b1d977b8e3..98264c25d9 100644 --- a/docs/content/logging-and-monitoring/service-insight.md +++ b/docs/content/logging-and-monitoring/service-insight.md @@ -20,14 +20,14 @@ Returned HTTP codes indicate the health of the upstreams (service). The service is not healthy (HTTP response code different than 200 OK) if all upstreams are unhealthy. The service is healthy if at least one upstream is healthy. In this case, the endpoint returns HTTP code 200 OK. - - ## Enabling Service Insight Endpoint If you're using *Kubernetes manifests* (Deployment or DaemonSet) to install the Ingress Controller, to enable the Service Insight endpoint: + 1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via paths `/probe/{hostname}` for Virtual Servers, and `/probe/ts/{name}` for Transport Servers on port `9114` (customizable with the `-service-insight-listen-port` command-line argument). 1. To enable TLS for the Service Insight endpoint, configure the `-service-insight-tls-secret` cli argument with the namespace and name of a TLS Secret. 1. Add the Service Insight port to the list of the ports of the Ingress Controller container in the template of the Ingress Controller pod: + ```yaml - name: service-insight containerPort: 9114 @@ -40,8 +40,8 @@ If you're using *Helm* to install the Ingress Controller, to enable Service Insi The Service Insight provides the following statistics: * Total number of VS and TS -* Number of VS and TS in 'Down' state -* Number of VS and TS in 'Healthy' state +* Number of VS and TS in 'Up' state +* Number of VS and TS in 'Unhealthy' state These statistics are returned as JSON: @@ -52,7 +52,7 @@ These statistics are returned as JSON: Response codes: * HTTP 200 OK - Service is healthy -* HTTP 404 - No upstreams/VS/TS found for the requested hostname / name +* HTTP 404 - No upstreams/VS/TS found for the requested hostname/name * HTTP 503 Service Unavailable - The service is down (All upstreams/VS/TS are "Unhealthy") **Note**: wildcards in hostnames are not supported at the moment. diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py index 9b35e47f24..d495519970 100644 --- a/tests/suite/test_transport_server_service_insight.py +++ b/tests/suite/test_transport_server_service_insight.py @@ -45,7 +45,7 @@ def test_responses_svc_insight_http( ingress_controller_prerequisites, ingress_controller_endpoint, ): - """Test responses from service insight enpoint with http""" + """Test responses from service insight endpoint with http""" retry = 0 resp = mock.Mock() resp.json.return_value = {} From d1b7feb45c07342acf0b86db3b01c6a27e1d733d Mon Sep 17 00:00:00 2001 From: Jakub Jarosz Date: Tue, 3 Jan 2023 15:58:24 +0000 Subject: [PATCH 07/10] Add python tests for service insight --- .../logging-and-monitoring/service-insight.md | 2 +- .../test_transport_server_service_insight.py | 190 +++++++++++++++--- .../test_virtual_server_service_insight.py | 4 +- 3 files changed, 167 insertions(+), 29 deletions(-) diff --git a/docs/content/logging-and-monitoring/service-insight.md b/docs/content/logging-and-monitoring/service-insight.md index 98264c25d9..b52e608e38 100644 --- a/docs/content/logging-and-monitoring/service-insight.md +++ b/docs/content/logging-and-monitoring/service-insight.md @@ -24,7 +24,7 @@ The service is healthy if at least one upstream is healthy. In this case, the en If you're using *Kubernetes manifests* (Deployment or DaemonSet) to install the Ingress Controller, to enable the Service Insight endpoint: -1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via paths `/probe/{hostname}` for Virtual Servers, and `/probe/ts/{name}` for Transport Servers on port `9114` (customizable with the `-service-insight-listen-port` command-line argument). +1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via paths `/probe/{hostname}` for Virtual Servers, and `/probe/ts/{service_name}` for Transport Servers on port `9114` (customizable with the `-service-insight-listen-port` command-line argument). The `service_name` parameter refers to the name of the deployed service (the service specified under `upstreams` in the transport server). 1. To enable TLS for the Service Insight endpoint, configure the `-service-insight-tls-secret` cli argument with the namespace and name of a TLS Secret. 1. Add the Service Insight port to the list of the ports of the Ingress Controller container in the template of the Ingress Controller pod: diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py index d495519970..42a994ec6d 100644 --- a/tests/suite/test_transport_server_service_insight.py +++ b/tests/suite/test_transport_server_service_insight.py @@ -1,63 +1,201 @@ +from pprint import pprint from unittest import mock import pytest import requests -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA +from suite.fixtures.fixtures import PublicEndpoint +from suite.utils.custom_resources_utils import create_ts_from_yaml, delete_ts, read_ts from suite.utils.resources_utils import ( + create_items_from_yaml, create_secret_from_yaml, + delete_items_from_yaml, delete_secret, - ensure_response_from_backend, - get_ts_nginx_template_conf, - patch_deployment_from_yaml, + get_first_pod_name, + get_nginx_template_conf, + replace_configmap_from_yaml, wait_before_test, + wait_until_all_pods_are_ready, ) -from suite.utils.yaml_utils import get_name_from_yaml +from suite.utils.ssl_utils import create_sni_session +from suite.utils.vs_vsr_resources_utils import create_virtual_server_from_yaml, delete_virtual_server, read_vs +from suite.utils.yaml_utils import get_first_host_from_yaml + + +class TransportServerTlsSetup: + """ + Encapsulate Transport Server details. + + Attributes: + public_endpoint (object): + ts_resource (dict): + name (str): + namespace (str): + ts_host (str): + """ + + def __init__(self, public_endpoint: PublicEndpoint, ts_resource, name, namespace, ts_host): + self.public_endpoint = public_endpoint + self.ts_resource = ts_resource + self.name = name + self.namespace = namespace + self.ts_host = ts_host + + +@pytest.fixture(scope="class") +def transport_server_tls_passthrough_setup( + request, kube_apis, test_namespace, ingress_controller_endpoint +) -> TransportServerTlsSetup: + """ + Prepare Transport Server Example. + + :param request: internal pytest fixture to parametrize this method + :param kube_apis: client apis + :param test_namespace: namespace for test resources + :param ingress_controller_endpoint: ip and port information + :return TransportServerTlsSetup: + """ + print("------------------------- Deploy Transport Server with tls passthrough -----------------------------------") + # deploy secure_app + secure_app_file = f"{TEST_DATA}/{request.param['example']}/standard/secure-app.yaml" + create_items_from_yaml(kube_apis, secure_app_file, test_namespace) + + # deploy transport server + transport_server_std_src = f"{TEST_DATA}/{request.param['example']}/standard/transport-server.yaml" + ts_resource = create_ts_from_yaml(kube_apis.custom_objects, transport_server_std_src, test_namespace) + ts_host = get_first_host_from_yaml(transport_server_std_src) + wait_until_all_pods_are_ready(kube_apis.v1, test_namespace) + + def fin(): + if request.config.getoption("--skip-fixture-teardown") == "no": + print("Clean up TransportServer and app:") + delete_ts(kube_apis.custom_objects, ts_resource, test_namespace) + delete_items_from_yaml(kube_apis, secure_app_file, test_namespace) + + request.addfinalizer(fin) + + return TransportServerTlsSetup( + ingress_controller_endpoint, + ts_resource, + ts_resource["metadata"]["name"], + test_namespace, + ts_host, + ) @pytest.mark.ts -@pytest.mark.skip_from_nginx_oss @pytest.mark.parametrize( - "crd_ingress_controller, transport_server_setup", + "crd_ingress_controller, transport_server_tls_passthrough_setup", [ ( { "type": "complete", "extra_args": [ - f"-global-configuration=nginx-ingress/nginx-configuration", - f"-enable-leader-election=false", - f"-enable-snippets", - f"-enable-custom-resources", - f"-enable-service-insight", + "-enable-tls-passthrough=true", + "-enable-service-insight", ], }, - {"example": "transport-server-status"}, + {"example": "transport-server-tls-passthrough"}, ) ], indirect=True, ) -class TestHealthCheckTransportServer: - def test_responses_svc_insight_http( +class TestTransportServerTSServiceInsightHTTP: + def test_ts_service_insight( self, - request, kube_apis, crd_ingress_controller, - transport_server_setup, - ingress_controller_prerequisites, + transport_server_tls_passthrough_setup, + test_namespace, ingress_controller_endpoint, ): - """Test responses from service insight endpoint with http""" + """ + Test Service Insight Endpoint with Transport Server on HTTP port. + """ + session = create_sni_session() + req_url = ( + f"https://{transport_server_tls_passthrough_setup.public_endpoint.public_ip}:" + f"{transport_server_tls_passthrough_setup.public_endpoint.port_ssl}" + ) + wait_before_test() + resp = session.get( + req_url, + headers={"host": transport_server_tls_passthrough_setup.ts_host}, + verify=False, + ) + assert resp.status_code == 200 + assert f"hello from pod {get_first_pod_name(kube_apis.v1, test_namespace)}" in resp.text + + # Service Insight test retry = 0 resp = mock.Mock() resp.json.return_value = {} resp.status_code == 502 - ts_source = f"{TEST_DATA}/transport-server-tcp-load-balance/standard/service_deployment.yaml" - name = get_name_from_yaml(ts_source) - req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/ts/{name}" - ensure_response_from_backend(req_url, transport_server_setup.ts_host) - while (resp.json() != {"Total": 3, "Up": 3, "Unhealthy": 0}) and retry < 5: - resp = requests.get(req_url) + + service_insight_endpoint = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/ts/secure-app" + resp = requests.get(service_insight_endpoint) + assert resp.status_code == 200, f"Expected 200 code for /probe/ts/secure-app but got {resp.status_code}" + + while (resp.json() != {"Total": 1, "Up": 1, "Unhealthy": 0}) and retry < 5: + resp = requests.get(service_insight_endpoint) wait_before_test() retry = +1 + assert resp.json() == {"Total": 1, "Up": 1, "Unhealthy": 0} + - assert resp.status_code == 200, f"Expected 200 code for /probe/ts/{name} but got {resp.status_code}" +@pytest.fixture(scope="class") +def https_secret_setup(request, kube_apis, test_namespace): + print("------------------------- Deploy Secret -----------------------------------") + secret_name = create_secret_from_yaml(kube_apis.v1, "nginx-ingress", f"{TEST_DATA}/service-insight/secret.yaml") + + def fin(): + delete_secret(kube_apis.v1, secret_name, "nginx-ingress") + + request.addfinalizer(fin) + + +@pytest.mark.ts +@pytest.mark.parametrize( + "crd_ingress_controller, transport_server_tls_passthrough_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + "-enable-tls-passthrough=true", + "-enable-service-insight", + "-service-insight-tls-secret=nginx-ingress/test-secret", + ], + }, + {"example": "transport-server-tls-passthrough"}, + ) + ], + indirect=True, +) +class TestTransportServerTSServiceInsightHTTPS: + def test_ts_service_insight_https( + self, + kube_apis, + crd_ingress_controller, + https_secret_setup, + transport_server_tls_passthrough_setup, + test_namespace, + ingress_controller_endpoint, + ): + """ + Test Service Insight Endpoint with Transport Server on HTTPS port. + """ + retry = 0 + resp = mock.Mock() + resp.json.return_value = {} + resp.status_code == 502 + + service_insight_endpoint = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/ts/secure-app" + resp = requests.get(service_insight_endpoint, verify=False) + assert resp.status_code == 200, f"Expected 200 code for /probe/ts/secure-app but got {resp.status_code}" + + while (resp.json() != {"Total": 1, "Up": 1, "Unhealthy": 0}) and retry < 5: + resp = requests.get(service_insight_endpoint, verify=False) + wait_before_test() + retry = +1 assert resp.json() == {"Total": 1, "Up": 1, "Unhealthy": 0} diff --git a/tests/suite/test_virtual_server_service_insight.py b/tests/suite/test_virtual_server_service_insight.py index bbda23c107..81cea90823 100644 --- a/tests/suite/test_virtual_server_service_insight.py +++ b/tests/suite/test_virtual_server_service_insight.py @@ -25,7 +25,7 @@ ], indirect=True, ) -class TestHealthCheckVsHttp: +class TestVirtualServerServiceInsightHTTP: def test_responses_svc_insight_http( self, request, kube_apis, crd_ingress_controller, virtual_server_setup, ingress_controller_endpoint ): @@ -77,7 +77,7 @@ def fin(): ], indirect=True, ) -class TestHealthCheckVsHttps: +class TestVirtualServerServiceInsightHTTPS: def test_responses_svc_insight_https( self, request, From e86bf987cc8d365fcd15038e30b83464f05f3682 Mon Sep 17 00:00:00 2001 From: Venktesh Date: Tue, 3 Jan 2023 16:29:46 +0000 Subject: [PATCH 08/10] restrict test to N+ only --- tests/suite/test_transport_server_service_insight.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py index 42a994ec6d..b60f72df0d 100644 --- a/tests/suite/test_transport_server_service_insight.py +++ b/tests/suite/test_transport_server_service_insight.py @@ -84,6 +84,7 @@ def fin(): @pytest.mark.ts +@pytest.mark.skip_for_nginx_oss @pytest.mark.parametrize( "crd_ingress_controller, transport_server_tls_passthrough_setup", [ @@ -155,6 +156,7 @@ def fin(): @pytest.mark.ts +@pytest.mark.skip_for_nginx_oss @pytest.mark.parametrize( "crd_ingress_controller, transport_server_tls_passthrough_setup", [ From 5e0e432f98e23a3cee322d3e605ec4c39d320487 Mon Sep 17 00:00:00 2001 From: Venktesh Date: Tue, 3 Jan 2023 16:31:58 +0000 Subject: [PATCH 09/10] fix fixture call order --- tests/suite/test_transport_server_service_insight.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py index b60f72df0d..9bda5959f3 100644 --- a/tests/suite/test_transport_server_service_insight.py +++ b/tests/suite/test_transport_server_service_insight.py @@ -178,8 +178,8 @@ class TestTransportServerTSServiceInsightHTTPS: def test_ts_service_insight_https( self, kube_apis, - crd_ingress_controller, https_secret_setup, + crd_ingress_controller, transport_server_tls_passthrough_setup, test_namespace, ingress_controller_endpoint, From c529853ac420066d922c728bbca0fc1955bc8f9e Mon Sep 17 00:00:00 2001 From: Jakub Jarosz Date: Tue, 3 Jan 2023 17:32:20 +0000 Subject: [PATCH 10/10] Update status code from 503 to 418 --- docs/content/logging-and-monitoring/service-insight.md | 4 ++-- internal/healthcheck/healthcheck.go | 4 ++-- internal/healthcheck/healthcheck_test.go | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/content/logging-and-monitoring/service-insight.md b/docs/content/logging-and-monitoring/service-insight.md index b52e608e38..9073e633b1 100644 --- a/docs/content/logging-and-monitoring/service-insight.md +++ b/docs/content/logging-and-monitoring/service-insight.md @@ -52,7 +52,7 @@ These statistics are returned as JSON: Response codes: * HTTP 200 OK - Service is healthy -* HTTP 404 - No upstreams/VS/TS found for the requested hostname/name -* HTTP 503 Service Unavailable - The service is down (All upstreams/VS/TS are "Unhealthy") +* HTTP 404 Not Found - No upstreams/VS/TS found for the requested hostname/name +* HTTP 418 I'm a teapot - The service is down (All upstreams/VS/TS are "Unhealthy") **Note**: wildcards in hostnames are not supported at the moment. diff --git a/internal/healthcheck/healthcheck.go b/internal/healthcheck/healthcheck.go index b299b01aed..5c74d8d7a5 100644 --- a/internal/healthcheck/healthcheck.go +++ b/internal/healthcheck/healthcheck.go @@ -119,7 +119,7 @@ func (hs *HealthServer) UpstreamStats(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") switch stats.Up { case 0: - w.WriteHeader(http.StatusServiceUnavailable) + w.WriteHeader(http.StatusTeapot) default: w.WriteHeader(http.StatusOK) } @@ -156,7 +156,7 @@ func (hs *HealthServer) StreamStats(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") switch stats.Up { case 0: - w.WriteHeader(http.StatusServiceUnavailable) + w.WriteHeader(http.StatusTeapot) default: w.WriteHeader(http.StatusOK) } diff --git a/internal/healthcheck/healthcheck_test.go b/internal/healthcheck/healthcheck_test.go index 71e1333d5f..2090f35f59 100644 --- a/internal/healthcheck/healthcheck_test.go +++ b/internal/healthcheck/healthcheck_test.go @@ -109,7 +109,7 @@ func TestHealthCheckServer_ReturnsCorrectStatsForHostnameOnAllPeersDown(t *testi } defer resp.Body.Close() //nolint:errcheck - if resp.StatusCode != http.StatusServiceUnavailable { + if resp.StatusCode != http.StatusTeapot { t.Fatal(resp.StatusCode) } @@ -307,7 +307,7 @@ func TestHealthCheckServer_ReturnsCorrectTransportServerStatsForNameOnAllPeersDo } defer resp.Body.Close() //nolint:errcheck - if resp.StatusCode != http.StatusServiceUnavailable { + if resp.StatusCode != http.StatusTeapot { t.Fatal(resp.StatusCode) }