Skip to content

Commit

Permalink
Add e2e test for session affinity after the chosen backend pod is
Browse files Browse the repository at this point in the history
deleted.

Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
  • Loading branch information
npinaeva committed Jan 16, 2024
1 parent f94db89 commit 24f9357
Showing 1 changed file with 88 additions and 0 deletions.
88 changes: 88 additions & 0 deletions test/e2e/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,11 @@ import (
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
testutils "k8s.io/kubernetes/test/utils"
)

const (
Expand Down Expand Up @@ -119,6 +122,69 @@ var _ = ginkgo.Describe("Services", func() {
framework.ExpectNoError(err)
})

ginkgo.It("Creates a service with session-affinity, and ensures it works after backend deletion", func() {
namespace := f.Namespace.Name
servicePort := 80
jig := e2eservice.NewTestJig(cs, namespace, serviceName)

ginkgo.By("Creating a session-affinity service")
var createdPods []*v1.Pod
maxContainerFailures := 0
replicas := 3
config := testutils.RCConfig{
Client: cs,
Image: framework.ServeHostnameImage,
Command: []string{"/agnhost", "serve-hostname"},
Name: "backend",
Labels: jig.Labels,
Namespace: namespace,
PollInterval: 3 * time.Second,
Timeout: framework.PodReadyBeforeTimeout,
Replicas: replicas,
CreatedPods: &createdPods,
MaxContainerFailures: &maxContainerFailures,
}
err := e2erc.RunRC(context.TODO(), config)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(len(createdPods)).To(gomega.Equal(replicas), fmt.Sprintf("incorrect number of running pods: %v", len(createdPods)))

svc, err := jig.CreateTCPService(context.TODO(), func(s *v1.Service) {
s.Spec.SessionAffinity = "ClientIP"
s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports = []v1.ServicePort{{
Port: int32(servicePort),
// agnhost serve-hostname port
TargetPort: intstr.FromInt32(9376),
Protocol: v1.ProtocolTCP,
}}
})
framework.ExpectNoError(err)

execPod := e2epod.CreateExecPodOrFail(context.TODO(), cs, namespace, "execpod-affinity", nil)
err = jig.CheckServiceReachability(context.TODO(), svc, execPod)
framework.ExpectNoError(err)

ensureStickySession := func() string {
hosts := getServiceBackendsFromPod(execPod, svc.Spec.ClusterIP, int(svc.Spec.Ports[0].Port))
uniqHosts := sets.New[string](hosts...)
gomega.Expect(uniqHosts.Len()).To(gomega.Equal(1), fmt.Sprintf("expected the same backend for every connection with session-affinity set, got %v", uniqHosts))
backendPod, _ := uniqHosts.PopAny()
return backendPod
}

ginkgo.By("check sessions affinity from a client pod")
backendPod := ensureStickySession()

ginkgo.By(fmt.Sprintf("delete chosen backend pod %v", backendPod))
err = e2epod.NewPodClient(f).Delete(context.TODO(), backendPod, metav1.DeleteOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodNotFoundInNamespace(context.TODO(), cs, backendPod, namespace, 60*time.Second)
framework.ExpectNoError(err)

ginkgo.By("check sessions affinity from a client pod again")
ensureStickySession()
})

// The below series of tests queries nodePort services with hostNetwork:true and hostNetwork:false pods as endpoints,
// for both HTTP and UDP and different ingress and egress payload sizes.
// Steps:
Expand Down Expand Up @@ -722,6 +788,28 @@ var _ = ginkgo.Describe("Services", func() {
})
})

func getServiceBackendsFromPod(execPod *v1.Pod, serviceIP string, servicePort int) []string {
connectionAttempts := 15
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort)
cmd := fmt.Sprintf("for i in $(seq 1 %d); do echo; %s ; done", connectionAttempts, curl)

stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort)
return nil
}
hosts := strings.Split(stdout, "\n")
nonEmptyHosts := []string{}
for _, host := range hosts {
if len(host) > 0 {
nonEmptyHosts = append(nonEmptyHosts, strings.TrimSpace(host))
}
}
gomega.Expect(len(nonEmptyHosts)).To(gomega.Equal(connectionAttempts), fmt.Sprintf("Expected %v replies, got %v", connectionAttempts, nonEmptyHosts))
return nonEmptyHosts
}

// This test ensures that - when a pod that's a backend for a service curls the
// service ip; if the traffic was DNAT-ed to the same src pod (hairpin/loopback case) -
// the srcIP of reply traffic is SNATed to the special masqurade IP 169.254.169.5
Expand Down

0 comments on commit 24f9357

Please sign in to comment.