Skip to content

Commit

Permalink
fix network e2e tests
Browse files Browse the repository at this point in the history
  • Loading branch information
alacuku committed Jul 20, 2021
1 parent 338387b commit 91908c3
Show file tree
Hide file tree
Showing 7 changed files with 29 additions and 16 deletions.
9 changes: 5 additions & 4 deletions internal/liqonet/route-operator/symmetricRoutingOperator.go
Expand Up @@ -150,10 +150,11 @@ func (src *SymmetricRoutingController) podFilter(obj client.Object) bool {
return false
}
// If podIP is not set return false.
// Here the newly created pods scheduled on a virtual node will be skipped. The filtered cache for all the pods
// scheduled on a virtual node works only when the correct label has been added to the pod. When pods are created
// the label is not present, but we are sure that it will be added before the IP address for the same pod is set.
//Once the pods have been labeled the api server should not inform the controller about them.
// Here the newly created pods scheduled on a virtual node will be skipped. It is a temporary situation untile
// the pods are labeled. The filtered cache for all the pods scheduled on a virtual node works only when the correct
// label has been added to the pod. When pods are created the label is not present, but we are sure that it will be
// added before the IP address for the same pod is set.
// Once the pods have been labeled they are filtered out at the cache level by the client.
if p.Status.PodIP == "" {
klog.V(infoLogLevel).Infof("skipping pod {%s} running on node {%s} has ip address set to empty", p.Name, p.Spec.NodeName)
return false
Expand Down
2 changes: 2 additions & 0 deletions pkg/virtualKubelet/provider/pods.go
Expand Up @@ -58,6 +58,8 @@ func (p *LiqoProvider) CreatePod(ctx context.Context, homePod *corev1.Pod) error

// Add a finalizer to allow the pod to be garbage collected by the incoming replicaset reflector.
// Add label to distinct the offloaded pods from the local ones.
// The merge strategy is types.StrategicMergePatchType in order to merger the previous state
// with the new configuration.
homePodPatch := []byte(fmt.Sprintf(
`{"metadata":{"labels":{"%s":"%s"},"finalizers":["%s"]}}`,
liqoconst.LocalPodLabelKey, liqoconst.LocalPodLabelValue, virtualKubelet.HomePodFinalizer))
Expand Down
7 changes: 5 additions & 2 deletions test/e2e/peering_e2e/basic_test.go
Expand Up @@ -56,7 +56,9 @@ var _ = Describe("Liqo E2E", func() {
Entry("VirtualNode is Ready on cluster 2", testContext.Clusters[0], namespace),
Entry("VirtualNode is Ready on cluster 1", testContext.Clusters[1], namespace),
)
})

Context("E2E network testing with pods and services", func() {
DescribeTable("Liqo Pod to Pod Connectivity Check",
func(homeCluster, foreignCluster tester.ClusterContext, namespace string) {
By("Deploy Tester Pod", func() {
Expand All @@ -75,8 +77,9 @@ var _ = Describe("Liqo E2E", func() {
})

By("Check Service NodePort Connectivity", func() {
err := net.ConnectivityCheckNodeToPod(ctx, homeCluster.Client, homeCluster.ClusterID)
Expect(err).ToNot(HaveOccurred())
Eventually(func() error {
return net.ConnectivityCheckNodeToPod(ctx, homeCluster.Client, homeCluster.ClusterID)
}, timeout, interval).ShouldNot(HaveOccurred())
})
},
Entry("Check Pod to Pod connectivity from cluster 1", testContext.Clusters[0], testContext.Clusters[1], namespace),
Expand Down
1 change: 1 addition & 0 deletions test/e2e/testutils/net/net.go
Expand Up @@ -64,6 +64,7 @@ func CheckPodConnectivity(ctx context.Context, homeConfig *restclient.Config, ho
return err
}
cmd := command + podRemoteUpdateCluster1.Status.PodIP
klog.Infof("running command %s", cmd)
stdout, stderr, err := util.ExecCmd(homeConfig, homeClient, podLocalUpdate.Name, podLocalUpdate.Namespace, cmd)
if stdout == "200" && err == nil {
return nil
Expand Down
8 changes: 0 additions & 8 deletions test/e2e/testutils/net/svc.go
Expand Up @@ -40,14 +40,6 @@ func EnsureNodePort(ctx context.Context, client kubernetes.Interface, clusterID,
klog.Error(err)
return nil, err
}
clusterIP := nodePort.Spec.ClusterIP
nodePort.Spec = serviceSpec
nodePort.Spec.ClusterIP = clusterIP
_, err = client.CoreV1().Services(namespace).Update(ctx, nodePort, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("%s -> an error occurred while updating nodePort service %s : %s", clusterID, name, err)
return nil, err
}
}
if err != nil {
klog.Errorf("%s -> an error occurred while creating nodePort service %s in namespace %s: %s", clusterID, name, namespace, err)
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/testutils/util/exec.go
Expand Up @@ -61,9 +61,10 @@ func TriggerCheckNodeConnectivity(localNodes *v1.NodeList, command string, nodeP
klog.Infof("running command %s", cmd)
err := c.Run()
if err != nil {
klog.Error(err)
klog.Info(output.String())
klog.Info(errput.String())
return nil
return err
}
}
return nil
Expand Down
15 changes: 14 additions & 1 deletion test/e2e/testutils/util/pod.go
Expand Up @@ -21,20 +21,33 @@ func IsPodUp(ctx context.Context, client kubernetes.Interface, namespace, podNam
virtualKubelet.ReflectedpodKey: podName,
}
if isHomePod {
klog.Infof("checking if local pod %s/%s is ready", namespace, podName)
podToCheck, err = client.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
klog.Errorf("an error occurred while getting pod %s/%s: %v", namespace, podName, err)
return false
}
} else {
klog.Infof("checking if remote pod is ready")
pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelSelector).String(),
})
if err != nil || len(pods.Items) == 0 {
if err != nil {
klog.Errorf("an error occurred while getting remote pod: %v", err)
return false
}
if len(pods.Items) == 0 {
klog.Error("an error occurred: remote pod not found")
return false
}
podToCheck = &pods.Items[0]
}
state := pod.IsPodReady(podToCheck)
if isHomePod {
klog.Infof("local pod %s/%s is ready", podToCheck.Namespace, podToCheck.Name)
} else {
klog.Infof("remote pod %s/%s is ready", podToCheck.Namespace, podToCheck.Name)
}
return state
}

Expand Down

0 comments on commit 91908c3

Please sign in to comment.