Skip to content

Commit

Permalink
OCPBUGS-26498: Add test for UpgradeValidation contention
Browse files Browse the repository at this point in the history
Add "The HAProxy router converges when multiple routers are
writing conflicting upgrade validation status" test which validates
router converge when writing conflicting status in a scenario that uses
multiple conditions.

Previously, we tested conflicting status fields (hostname), but don't
have a test for conflicting status. This test add logic that exercises
new logic in the router for the Upgrade Validation plugin.
  • Loading branch information
gcs278 committed Apr 16, 2024
1 parent 7379790 commit e03927e
Show file tree
Hide file tree
Showing 2 changed files with 160 additions and 4 deletions.
162 changes: 158 additions & 4 deletions test/extended/router/stress.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ var _ = g.Describe("[sig-network][Feature:Router][apigroup:route.openshift.io]",
rs, err := oc.KubeClient().AppsV1().ReplicaSets(ns).Create(
context.Background(),
scaledRouter(
"router",
routerImage,
[]string{
"-v=4",
Expand Down Expand Up @@ -166,6 +167,7 @@ var _ = g.Describe("[sig-network][Feature:Router][apigroup:route.openshift.io]",
rs, err := oc.KubeClient().AppsV1().ReplicaSets(ns).Create(
context.Background(),
scaledRouter(
"router",
routerImage,
[]string{
"-v=4",
Expand Down Expand Up @@ -295,6 +297,158 @@ var _ = g.Describe("[sig-network][Feature:Router][apigroup:route.openshift.io]",
o.Expect(writes).To(o.BeNumerically("<", 5))
}()
})

g.It("converges when multiple routers are writing conflicting upgrade validation status", func() {
g.By("deploying a scaled out namespace scoped router")

rsAdd, err := oc.KubeClient().AppsV1().ReplicaSets(ns).Create(
context.Background(),
scaledRouter(
"router-add-condition",
"quay.io/gspence/router:upgrade-validation-e2e",
//routerImage,
[]string{
"-v=5",
fmt.Sprintf("--namespace=%s", ns),
// the contention tracker is resync / 10, so this will give us 2 minutes of contention tracking
"--resync-interval=20m",
"--name=conflicting",
"--upgrade-validation-force-add-condition",
},
),
metav1.CreateOptions{},
)
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForReadyReplicaSet(oc.KubeClient(), ns, rsAdd.Name)
o.Expect(err).NotTo(o.HaveOccurred())

rsRemove, err := oc.KubeClient().AppsV1().ReplicaSets(ns).Create(
context.Background(),
scaledRouter(
"router-remove-condition",
"quay.io/gspence/router:upgrade-validation-e2e",
[]string{
"-v=5",
fmt.Sprintf("--namespace=%s", ns),
// the contention tracker is resync / 10, so this will give us 2 minutes of contention tracking
"--resync-interval=20m",
"--name=conflicting",
"--upgrade-validation-force-remove-condition",
},
),
metav1.CreateOptions{},
)
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForReadyReplicaSet(oc.KubeClient(), ns, rsRemove.Name)
o.Expect(err).NotTo(o.HaveOccurred())

g.By("creating multiple routes")
client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns)
var rv string
for i := 0; i < 20; i++ {
_, err := client.Create(context.Background(), &routev1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%d", i),
},
Spec: routev1.RouteSpec{
To: routev1.RouteTargetReference{Name: "test"},
Port: &routev1.RoutePort{
TargetPort: intstr.FromInt(8080),
},
},
}, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}

g.By("waiting for sufficient routes to have a status")
err = wait.Poll(time.Second, 2*time.Minute, func() (bool, error) {
routes, err := client.List(context.Background(), metav1.ListOptions{})
if err != nil {
return false, err
}
o.Expect(routes.Items).To(o.HaveLen(20))
other := 0
conflicting := 0
for _, route := range routes.Items {
ingress := findIngress(&route, "conflicting")
if ingress == nil {
if len(route.Status.Ingress) > 0 {
other++
}
continue
}
if len(route.Status.Ingress) > 1 {
other++
}
conflicting++
o.Expect(ingress.Host).NotTo(o.BeEmpty())
o.Expect(ingress.Conditions).NotTo(o.BeEmpty())
}
// if other routers are writing status, wait until we get a complete
// set since we don't have a way to tell other routers to ignore us
if conflicting < 3 && other%20 != 0 {
return false, nil
}
outputIngress(routes.Items...)
rv = routes.ResourceVersion
return true, nil
})
o.Expect(err).NotTo(o.HaveOccurred())

g.By("verifying that we stop writing conflicts rapidly")
writes := 0
w, err := client.Watch(context.Background(), metav1.ListOptions{Watch: true, ResourceVersion: rv})
o.Expect(err).NotTo(o.HaveOccurred())
func() {
defer w.Stop()
timer := time.NewTimer(15 * time.Second)
ch := w.ResultChan()
Wait:
for i := 0; ; i++ {
select {
case _, ok := <-ch:
writes++
o.Expect(ok).To(o.BeTrue())
case <-timer.C:
break Wait
}
}
e2e.Logf("wrote %d times", writes)
// we expect to see no more than 10 writes per router (we should hit the hard limit) (3 replicas and 1 master)
o.Expect(writes).To(o.BeNumerically("<=", 50))
}()

g.By("clearing a single route's status")
route, err := client.Patch(context.Background(), "9", types.MergePatchType, []byte(`{"status":{"ingress":[]}}`), metav1.PatchOptions{}, "status")
o.Expect(err).NotTo(o.HaveOccurred())

g.By("verifying that only get a few updates")
writes = 0
w, err = client.Watch(context.Background(), metav1.ListOptions{Watch: true, ResourceVersion: route.ResourceVersion})
o.Expect(err).NotTo(o.HaveOccurred())
func() {
defer w.Stop()
timer := time.NewTimer(10 * time.Second)
ch := w.ResultChan()
Wait:
for i := 0; ; i++ {
select {
case obj, ok := <-ch:
o.Expect(ok).To(o.BeTrue())
if r, ok := obj.Object.(*routev1.Route); ok {
if r == nil || r.Name != "9" {
continue
}
}
writes++
case <-timer.C:
break Wait
}
}
e2e.Logf("wrote %d times", writes)
o.Expect(writes).To(o.BeNumerically("<", 5))
}()
})
})
})

Expand All @@ -307,21 +461,21 @@ func findIngress(route *routev1.Route, name string) *routev1.RouteIngress {
return nil
}

func scaledRouter(image string, args []string) *appsv1.ReplicaSet {
func scaledRouter(name, image string, args []string) *appsv1.ReplicaSet {
one := int64(1)
scale := int32(3)
return &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "router",
Name: name,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: &scale,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "router"},
MatchLabels: map[string]string{"app": name},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "router"},
Labels: map[string]string{"app": name},
},
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: &one,
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit e03927e

Please sign in to comment.