diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e42787756..254daac015 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -86,6 +86,10 @@ Adding a new version? You'll need three changes: from Konnect. The initial period will be used until a valid license is retrieved. The default values are 1m and 12h respectively. [#4178](https://github.com/Kong/kubernetes-ingress-controller/pull/4178) +- Whenever the kong configuration is corrrectly built and pushed, it is stored + in memory and used as a fallback in case the configuration gets broken in the + future. + [#4205](https://github.com/Kong/kubernetes-ingress-controller/pull/4205) ### Changed diff --git a/internal/dataplane/kong_client.go b/internal/dataplane/kong_client.go index f367854702..569d23fc57 100644 --- a/internal/dataplane/kong_client.go +++ b/internal/dataplane/kong_client.go @@ -117,6 +117,11 @@ type KongClient struct { // SHAs is a slice is configuration hashes send in last batch send. SHAs []string + // lastValidKongState contains the last valid configuration pushed + // to the gateways. It is used as a fallback in case a newer config version is + // somehow broken. + lastValidKongState *kongstate.KongState + // clientsProvider allows retrieving the most recent set of clients. clientsProvider clients.AdminAPIClientsProvider @@ -388,6 +393,13 @@ func (c *KongClient) Update(ctx context.Context) error { // In case of a failure in syncing configuration with Gateways, propagate the error. if gatewaysSyncErr != nil { + if c.lastValidKongState != nil { + _, fallbackSyncErr := c.sendOutToGatewayClients(ctx, c.lastValidKongState, c.kongConfig) + if fallbackSyncErr != nil { + return errors.Join(gatewaysSyncErr, fallbackSyncErr) + } + c.logger.Debug("due to errors in the current config, the last valid config has been pushed to Gateways") + } return gatewaysSyncErr } @@ -423,6 +435,8 @@ func (c *KongClient) sendOutToGatewayClients( sort.Strings(shas) c.SHAs = shas + c.lastValidKongState = s + return previousSHAs, nil } diff --git a/test/e2e/all_in_one_test.go b/test/e2e/all_in_one_test.go index 6ed380e050..73dd521f1e 100644 --- a/test/e2e/all_in_one_test.go +++ b/test/e2e/all_in_one_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stypes "k8s.io/apimachinery/pkg/types" @@ -305,7 +306,7 @@ func TestDeployAllInOneDBLESS(t *testing.T) { deployments := getManifestDeployments(manifestFilePath) t.Log("running ingress tests to verify all-in-one deployed ingress controller and proxy are functional") - deployIngressWithEchoBackends(ctx, t, env, numberOfEchoBackends) + ingress := deployIngressWithEchoBackends(ctx, t, env, numberOfEchoBackends) verifyIngressWithEchoBackends(ctx, t, env, numberOfEchoBackends) ensureAllProxyReplicasAreConfigured(ctx, t, env, deployments.ProxyNN) @@ -325,6 +326,19 @@ func TestDeployAllInOneDBLESS(t *testing.T) { t.Log("scale proxy to 3 replicas and wait for all instances to be ready") scaleDeployment(ctx, t, env, deployments.ProxyNN, 3) ensureAllProxyReplicasAreConfigured(ctx, t, env, deployments.ProxyNN) + + t.Log("scale proxy to 1 replica") + scaleDeployment(ctx, t, env, deployments.ProxyNN, 1) + + t.Log("misconfigure the ingress") + reconfigureExistingIngress(ctx, t, env, ingress, func(i *netv1.Ingress) { + ingress.Spec.Rules[0].HTTP.Paths[0].Path = badEchoPath + }) + + t.Log("scale proxy to 3 replicas and verify that the new replicas get the old good configuration") + scaleDeployment(ctx, t, env, deployments.ProxyNN, 3) + // verify all the proxy replicas have the last good configuration + ensureAllProxyReplicasAreConfigured(ctx, t, env, deployments.ProxyNN) } func ensureAllProxyReplicasAreConfigured(ctx context.Context, t *testing.T, env environments.Environment, proxyDeploymentNN k8stypes.NamespacedName) { diff --git a/test/e2e/helpers_test.go b/test/e2e/helpers_test.go index c63631a6be..594198719a 100644 --- a/test/e2e/helpers_test.go +++ b/test/e2e/helpers_test.go @@ -84,6 +84,12 @@ const ( // migrationsJobName is the name of the migrations job in postgres manifests variant. migrationsJobName = "kong-migrations" + + // echoPath is the legit echo path to use for the echo service. + echoPath = "/echo" + + // badEchoPath is a wrong path to use for testing ingress misconfiguration. + badEchoPath = "/~/echo/**" ) // setupE2ETest builds a testing environment for the E2E test. It also sets up the environment's teardown and test @@ -409,7 +415,7 @@ func deployIngressWithEchoBackends(ctx context.Context, t *testing.T, env enviro _, err = c.ConfigurationV1().KongIngresses(corev1.NamespaceDefault).Create(ctx, king, metav1.CreateOptions{}) require.NoError(t, err) t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass) - ingress := generators.NewIngressForService("/echo", map[string]string{ + ingress := generators.NewIngressForService(echoPath, map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", "konghq.com/override": kongIngressName, @@ -418,6 +424,14 @@ func deployIngressWithEchoBackends(ctx context.Context, t *testing.T, env enviro return ingress } +func reconfigureExistingIngress(ctx context.Context, t *testing.T, env environments.Environment, ingress *netv1.Ingress, options ...func(*netv1.Ingress)) { + for _, opt := range options { + opt(ingress) + } + _, err := env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) +} + //nolint:unparam func verifyIngressWithEchoBackends( ctx context.Context,