From 91908c3b8c9f495f95282bcc9e0caa6ffd50be48 Mon Sep 17 00:00:00 2001 From: alacuku Date: Fri, 16 Jul 2021 18:26:59 +0200 Subject: [PATCH] fix network e2e tests --- .../route-operator/symmetricRoutingOperator.go | 9 +++++---- pkg/virtualKubelet/provider/pods.go | 2 ++ test/e2e/peering_e2e/basic_test.go | 7 +++++-- test/e2e/testutils/net/net.go | 1 + test/e2e/testutils/net/svc.go | 8 -------- test/e2e/testutils/util/exec.go | 3 ++- test/e2e/testutils/util/pod.go | 15 ++++++++++++++- 7 files changed, 29 insertions(+), 16 deletions(-) diff --git a/internal/liqonet/route-operator/symmetricRoutingOperator.go b/internal/liqonet/route-operator/symmetricRoutingOperator.go index 1cb7c8790c..22d3726fe1 100644 --- a/internal/liqonet/route-operator/symmetricRoutingOperator.go +++ b/internal/liqonet/route-operator/symmetricRoutingOperator.go @@ -150,10 +150,11 @@ func (src *SymmetricRoutingController) podFilter(obj client.Object) bool { return false } // If podIP is not set return false. - // Here the newly created pods scheduled on a virtual node will be skipped. The filtered cache for all the pods - // scheduled on a virtual node works only when the correct label has been added to the pod. When pods are created - // the label is not present, but we are sure that it will be added before the IP address for the same pod is set. - //Once the pods have been labeled the api server should not inform the controller about them. + // Here the newly created pods scheduled on a virtual node will be skipped. It is a temporary situation untile + // the pods are labeled. The filtered cache for all the pods scheduled on a virtual node works only when the correct + // label has been added to the pod. When pods are created the label is not present, but we are sure that it will be + // added before the IP address for the same pod is set. + // Once the pods have been labeled they are filtered out at the cache level by the client. if p.Status.PodIP == "" { klog.V(infoLogLevel).Infof("skipping pod {%s} running on node {%s} has ip address set to empty", p.Name, p.Spec.NodeName) return false diff --git a/pkg/virtualKubelet/provider/pods.go b/pkg/virtualKubelet/provider/pods.go index 8a051bc194..da31cb8262 100644 --- a/pkg/virtualKubelet/provider/pods.go +++ b/pkg/virtualKubelet/provider/pods.go @@ -58,6 +58,8 @@ func (p *LiqoProvider) CreatePod(ctx context.Context, homePod *corev1.Pod) error // Add a finalizer to allow the pod to be garbage collected by the incoming replicaset reflector. // Add label to distinct the offloaded pods from the local ones. + // The merge strategy is types.StrategicMergePatchType in order to merger the previous state + // with the new configuration. homePodPatch := []byte(fmt.Sprintf( `{"metadata":{"labels":{"%s":"%s"},"finalizers":["%s"]}}`, liqoconst.LocalPodLabelKey, liqoconst.LocalPodLabelValue, virtualKubelet.HomePodFinalizer)) diff --git a/test/e2e/peering_e2e/basic_test.go b/test/e2e/peering_e2e/basic_test.go index 10f8ec9089..70035c2edf 100644 --- a/test/e2e/peering_e2e/basic_test.go +++ b/test/e2e/peering_e2e/basic_test.go @@ -56,7 +56,9 @@ var _ = Describe("Liqo E2E", func() { Entry("VirtualNode is Ready on cluster 2", testContext.Clusters[0], namespace), Entry("VirtualNode is Ready on cluster 1", testContext.Clusters[1], namespace), ) + }) + Context("E2E network testing with pods and services", func() { DescribeTable("Liqo Pod to Pod Connectivity Check", func(homeCluster, foreignCluster tester.ClusterContext, namespace string) { By("Deploy Tester Pod", func() { @@ -75,8 +77,9 @@ var _ = Describe("Liqo E2E", func() { }) By("Check Service NodePort Connectivity", func() { - err := net.ConnectivityCheckNodeToPod(ctx, homeCluster.Client, homeCluster.ClusterID) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return net.ConnectivityCheckNodeToPod(ctx, homeCluster.Client, homeCluster.ClusterID) + }, timeout, interval).ShouldNot(HaveOccurred()) }) }, Entry("Check Pod to Pod connectivity from cluster 1", testContext.Clusters[0], testContext.Clusters[1], namespace), diff --git a/test/e2e/testutils/net/net.go b/test/e2e/testutils/net/net.go index 2b4c8c6561..4a8a745fcc 100644 --- a/test/e2e/testutils/net/net.go +++ b/test/e2e/testutils/net/net.go @@ -64,6 +64,7 @@ func CheckPodConnectivity(ctx context.Context, homeConfig *restclient.Config, ho return err } cmd := command + podRemoteUpdateCluster1.Status.PodIP + klog.Infof("running command %s", cmd) stdout, stderr, err := util.ExecCmd(homeConfig, homeClient, podLocalUpdate.Name, podLocalUpdate.Namespace, cmd) if stdout == "200" && err == nil { return nil diff --git a/test/e2e/testutils/net/svc.go b/test/e2e/testutils/net/svc.go index 645eb7e869..69ad2d34e5 100644 --- a/test/e2e/testutils/net/svc.go +++ b/test/e2e/testutils/net/svc.go @@ -40,14 +40,6 @@ func EnsureNodePort(ctx context.Context, client kubernetes.Interface, clusterID, klog.Error(err) return nil, err } - clusterIP := nodePort.Spec.ClusterIP - nodePort.Spec = serviceSpec - nodePort.Spec.ClusterIP = clusterIP - _, err = client.CoreV1().Services(namespace).Update(ctx, nodePort, metav1.UpdateOptions{}) - if err != nil { - klog.Errorf("%s -> an error occurred while updating nodePort service %s : %s", clusterID, name, err) - return nil, err - } } if err != nil { klog.Errorf("%s -> an error occurred while creating nodePort service %s in namespace %s: %s", clusterID, name, namespace, err) diff --git a/test/e2e/testutils/util/exec.go b/test/e2e/testutils/util/exec.go index 81179cdd3b..754e766944 100644 --- a/test/e2e/testutils/util/exec.go +++ b/test/e2e/testutils/util/exec.go @@ -61,9 +61,10 @@ func TriggerCheckNodeConnectivity(localNodes *v1.NodeList, command string, nodeP klog.Infof("running command %s", cmd) err := c.Run() if err != nil { + klog.Error(err) klog.Info(output.String()) klog.Info(errput.String()) - return nil + return err } } return nil diff --git a/test/e2e/testutils/util/pod.go b/test/e2e/testutils/util/pod.go index f51cce0a5c..333b75cfdb 100644 --- a/test/e2e/testutils/util/pod.go +++ b/test/e2e/testutils/util/pod.go @@ -21,20 +21,33 @@ func IsPodUp(ctx context.Context, client kubernetes.Interface, namespace, podNam virtualKubelet.ReflectedpodKey: podName, } if isHomePod { + klog.Infof("checking if local pod %s/%s is ready", namespace, podName) podToCheck, err = client.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { + klog.Errorf("an error occurred while getting pod %s/%s: %v", namespace, podName, err) return false } } else { + klog.Infof("checking if remote pod is ready") pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labelSelector).String(), }) - if err != nil || len(pods.Items) == 0 { + if err != nil { + klog.Errorf("an error occurred while getting remote pod: %v", err) + return false + } + if len(pods.Items) == 0 { + klog.Error("an error occurred: remote pod not found") return false } podToCheck = &pods.Items[0] } state := pod.IsPodReady(podToCheck) + if isHomePod { + klog.Infof("local pod %s/%s is ready", podToCheck.Namespace, podToCheck.Name) + } else { + klog.Infof("remote pod %s/%s is ready", podToCheck.Namespace, podToCheck.Name) + } return state }