Skip to content

Commit

Permalink
E2E test fix
Browse files Browse the repository at this point in the history
  • Loading branch information
cheina97 committed May 9, 2024
1 parent f89702f commit fa56af4
Show file tree
Hide file tree
Showing 5 changed files with 61 additions and 10 deletions.
2 changes: 1 addition & 1 deletion test/e2e/pipeline/installer/liqoctl/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ do
fi
if [[ "${INFRA}" == "cluster-api" ]]; then
LIQO_PROVIDER="kubeadm"
COMMON_ARGS=("${COMMON_ARGS[@]}" --set auth.service.type=NodePort --set gateway.service.type=NodePort)
COMMON_ARGS=("${COMMON_ARGS[@]}" --set auth.service.type=NodePort --set peering.networking.gateway.server.service.type=NodePort )
else
LIQO_PROVIDER="${INFRA}"
fi
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/postinstall/basic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ var _ = Describe("Liqo E2E", func() {
Eventually(func() bool {
readyPods, notReadyPods, err := util.ArePodsUp(ctx, cluster.NativeClient, tenantNs.Name)
klog.Infof("Tenant pods status: %d ready, %d not ready", len(readyPods), len(notReadyPods))
return err == nil && len(notReadyPods) == 0 && len(readyPods) == 1
return err == nil && len(notReadyPods) == 0 && len(readyPods) == 2
}, timeout, interval).Should(BeTrue())
}
},
Expand Down
21 changes: 18 additions & 3 deletions test/e2e/testutils/microservices/deploy_app.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,25 @@ func CheckApplicationIsWorking(t ginkgo.GinkgoTInterface, options *k8s.KubectlOp
if len(nodes) == 0 {
return fmt.Errorf("no nodes retrieved from the cluster")
}
nodeAddress, err := getInternalAddress(nodes[0].Status.Addresses)
if err != nil {
return err

var nodeAddress string
if len(nodes) == 1 {
nodeAddress, err = getInternalAddress(nodes[0].Status.Addresses)
if err != nil {
return err
}
} else {
for i := range nodes {
if util.IsNodeControlPlane(nodes[i].Spec.Taints) {
continue
}
nodeAddress, err = getInternalAddress(nodes[i].Status.Addresses)
if err != nil {
return err
}
}
}

url := fmt.Sprintf("http://%s:%d", nodeAddress, service.Spec.Ports[0].NodePort)
return http_helper.HttpGetWithRetryWithCustomValidationE(t, url, nil, retries, sleepBetweenRetries, func(code int, body string) bool {
return code == 200
Expand Down
3 changes: 3 additions & 0 deletions test/e2e/testutils/util/exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,9 @@ func TriggerCheckNodeConnectivity(localNodes *v1.NodeList, command string, nodeP
return fmt.Errorf("nodePort Value invalid (Must be >= 0)")
}
for index := range localNodes.Items {
if len(localNodes.Items) != 1 && IsNodeControlPlane(localNodes.Items[index].Spec.Taints) {
continue
}
cmd := command + localNodes.Items[index].Status.Addresses[0].Address + ":" + strconv.Itoa(nodePortValue)
c := exec.Command("sh", "-c", cmd) //nolint:gosec // Just a test, no need for this check
output := &bytes.Buffer{}
Expand Down
43 changes: 38 additions & 5 deletions test/e2e/testutils/util/nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ package util
import (
"context"

v1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
Expand All @@ -26,15 +26,48 @@ import (
"github.com/liqotech/liqo/pkg/consts"
)

const (
// controlPlaneTaintKey is the key of the taint applied to control-plane nodes.
controlPlaneTaintKey = "node-role.kubernetes.io/control-plane"
)

// IsNodeControlPlane checks if the node has the control-plane taint.
func IsNodeControlPlane(taints []corev1.Taint) bool {
for _, taint := range taints {
if taint.Key == controlPlaneTaintKey {
return true
}
}
return false
}

// GetNodes returns the list of nodes of the cluster matching the given labels.
func GetNodes(ctx context.Context, client kubernetes.Interface, clusterID, labelSelector string) (*v1.NodeList, error) {
func GetNodes(ctx context.Context, client kubernetes.Interface, clusterID, labelSelector string) (*corev1.NodeList, error) {
remoteNodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
klog.Errorf("%s -> an error occurred while listing nodes: %s", clusterID, err)
return nil, err
}
return remoteNodes, nil
}

// GetWorkerNodes returns the list of worker nodes of the cluster.
func GetWorkerNodes(ctx context.Context, client kubernetes.Interface, clusterID, labelSelector string) (*corev1.NodeList, error) {
remoteNodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
klog.Errorf("%s -> an error occurred while listing nodes: %s", clusterID, err)
return nil, err
}
var remoteNodeWorkers corev1.NodeList
for i := range remoteNodes.Items {
if !IsNodeControlPlane(remoteNodes.Items[i].Spec.Taints) {
remoteNodeWorkers.Items = append(remoteNodeWorkers.Items, remoteNodes.Items[i])
}
}
return remoteNodes, nil
}

Expand All @@ -57,14 +90,14 @@ func CheckVirtualNodes(ctx context.Context, homeClusterClient kubernetes.Interfa

for index := range virtualNodes.Items {
for _, condition := range virtualNodes.Items[index].Status.Conditions {
if condition.Type == v1.NodeReady {
if condition.Status == v1.ConditionFalse {
if condition.Type == corev1.NodeReady {
if condition.Status == corev1.ConditionFalse {
klog.Infof("Virtual nodes aren't yet ready: node %d has %s=%s",
index, condition.Type, condition.Status)
return false
}
} else {
if condition.Status == v1.ConditionTrue {
if condition.Status == corev1.ConditionTrue {
klog.Infof("Virtual nodes aren't yet ready: node %d has %s=%s",
index, condition.Type, condition.Status)
return false
Expand Down

0 comments on commit fa56af4

Please sign in to comment.