Skip to content

Commit

Permalink
fix e2e
Browse files Browse the repository at this point in the history
  • Loading branch information
czeslavo committed Jul 18, 2023
1 parent ce4964c commit e086e3e
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 25 deletions.
41 changes: 26 additions & 15 deletions internal/clients/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,27 +228,33 @@ func (c *AdminAPIClientsManager) gatewayClientsReconciliationLoop() {
// It will adjust lists of gateway clients and notify subscribers about the change if readyGatewayClients list has
// changed.
func (c *AdminAPIClientsManager) onDiscoveredAdminAPIsNotification(discoveredAdminAPIs []adminapi.DiscoveredAdminAPI) {
c.lock.Lock()
defer c.lock.Unlock()

c.logger.Debug("received notification about Admin API addresses change")
if clientsChanged := c.adjustGatewayClients(discoveredAdminAPIs); clientsChanged {
// Notify subscribers that the clients list has been updated.
c.logger.Debug("notifying subscribers about gateway clients change")

changed := func() bool {
c.lock.Lock()
defer c.lock.Unlock()
clientsChanged := c.adjustGatewayClients(discoveredAdminAPIs)
readinessChanged := c.reconcileGatewayClientsReadiness()
return clientsChanged || readinessChanged
}()

if changed {
c.notifyGatewayClientsSubscribers()
}
}

// onReadinessReconciliationTick is called on every readinessReconciliationTicker tick. It will reconcile readiness
// of all gateway clients and notify subscribers about the change if readyGatewayClients list has changed.
func (c *AdminAPIClientsManager) onReadinessReconciliationTick() {
c.lock.Lock()
defer c.lock.Unlock()

c.logger.Debug("reconciling readiness of gateway clients")
if clientsChanged := c.reconcileGatewayClientsReadiness(); clientsChanged {
// Notify subscribers that the clients list has been updated.
c.logger.Debug("notifying subscribers about gateway clients change")

changed := func() bool {
c.lock.Lock()
defer c.lock.Unlock()
return c.reconcileGatewayClientsReadiness()
}()

if changed {
c.notifyGatewayClientsSubscribers()
}
}
Expand All @@ -258,7 +264,7 @@ func (c *AdminAPIClientsManager) onReadinessReconciliationTick() {
// of the discovered Admin APIs and creates only those clients that we don't have.
// It returns true if the gatewayClients slice has been changed, false otherwise.
func (c *AdminAPIClientsManager) adjustGatewayClients(discoveredAdminAPIs []adminapi.DiscoveredAdminAPI) (changed bool) {
// Short circuit,
// Short circuit.
if len(discoveredAdminAPIs) == 0 {
// If we have no clients and the provided list is empty, it means we're in sync. No change was made.
if len(c.readyGatewayClients) == 0 && len(c.pendingGatewayClients) == 0 {
Expand Down Expand Up @@ -299,8 +305,7 @@ func (c *AdminAPIClientsManager) adjustGatewayClients(discoveredAdminAPIs []admi
}
}

readinessChanged := c.reconcileGatewayClientsReadiness()
return changed || readinessChanged
return changed
}

// reconcileGatewayClientsReadiness reconciles the readiness of the gateway clients. It ensures that the clients on the
Expand All @@ -313,6 +318,11 @@ func (c *AdminAPIClientsManager) reconcileGatewayClientsReadiness() bool {
// It's to ensure that the readiness is not reconciled too often when we receive a lot of notifications.
defer c.readinessReconciliationTicker.Reset(DefaultReadinessReconciliationInterval)

// Short circuit.
if len(c.readyGatewayClients) == 0 && len(c.pendingGatewayClients) == 0 {
return false
}

readinessCheckResult := c.readinessChecker.CheckReadiness(
c.ctx,
lo.MapToSlice(c.readyGatewayClients, func(_ string, cl *adminapi.Client) AlreadyCreatedClient { return cl }),
Expand All @@ -333,6 +343,7 @@ func (c *AdminAPIClientsManager) reconcileGatewayClientsReadiness() bool {

// notifyGatewayClientsSubscribers sends notifications to all subscribers that have called SubscribeToGatewayClientsChanges.
func (c *AdminAPIClientsManager) notifyGatewayClientsSubscribers() {
c.logger.Debug("notifying subscribers about gateway clients change")
for _, sub := range c.gatewayClientsChangesSubscribers {
select {
case <-c.ctx.Done():
Expand Down
27 changes: 26 additions & 1 deletion test/e2e/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ func waitForDeploymentRollout(ctx context.Context, t *testing.T, env environment
}

if err := allUpdatedReplicasRolledOutAndReady(deployment); err != nil {
t.Logf("controller deployment not ready: %s", err)
t.Logf("%s/%s deployment not ready: %s", namespace, name, err)
return false
}

Expand Down Expand Up @@ -900,3 +900,28 @@ func scaleDeployment(ctx context.Context, t *testing.T, env environments.Environ
return deployment.Status.ReadyReplicas == replicas
}, time.Minute*3, time.Second, "deployment %s did not scale to %d replicas", deployment.Name, replicas)
}

func (d Deployments) Restart(ctx context.Context, t *testing.T, env environments.Environment) {
t.Helper()

err := env.Cluster().Client().CoreV1().Pods(d.ControllerNN.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{
"app": d.ControllerNN.Name,
},
}),
})
require.NoError(t, err, "failed to delete controller pods")

err = env.Cluster().Client().CoreV1().Pods(d.ControllerNN.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{
"app": d.ProxyNN.Name,
},
}),
})
require.NoError(t, err, "failed to delete proxy pods")

waitForDeploymentRollout(ctx, t, env, d.ControllerNN.Namespace, d.ControllerNN.Name)
waitForDeploymentRollout(ctx, t, env, d.ProxyNN.Namespace, d.ProxyNN.Name)
}
9 changes: 2 additions & 7 deletions test/e2e/kuma_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,8 @@ func TestDeployAllInOneDBLESSKuma(t *testing.T) {
require.NoError(t, kuma.EnableMeshForNamespace(ctx, env.Cluster(), "kong"))
require.NoError(t, kuma.EnableMeshForNamespace(ctx, env.Cluster(), "default"))

// scale to force a restart of pods and trigger mesh injection (we can't annotate the Kong namespace in advance,
// it gets clobbered by deployKong()). is there a "rollout restart" in client-go? who knows!
scaleDeployment(ctx, t, env, deployments.ProxyNN, 0)
scaleDeployment(ctx, t, env, deployments.ControllerNN, 0)

scaleDeployment(ctx, t, env, deployments.ProxyNN, 2)
scaleDeployment(ctx, t, env, deployments.ControllerNN, 2)
// Restart Kong pods to trigger mesh injection.
deployments.Restart(ctx, t, env)

t.Log("running ingress tests to verify all-in-one deployed ingress controller and proxy are functional")
deployIngressWithEchoBackends(ctx, t, env, numberOfEchoBackends)
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,9 @@ func testManifestsUpgrade(

t.Logf("deploying target version of kong manifests: %s", testParams.toManifestPath)
deployments := ManifestDeploy{
Path: testParams.toManifestPath,
SkipTestPatches: true,
Path: testParams.toManifestPath,
// Do not skip test patches - we want to verify that upgrade works with an image override in target manifest.
SkipTestPatches: false,
}.Run(ctx, t, env)

if featureGates := testParams.controllerFeatureGates; featureGates != "" {
Expand Down

0 comments on commit e086e3e

Please sign in to comment.