From f52b15091d21a18a0e1928c55788f035f206edf7 Mon Sep 17 00:00:00 2001 From: shane Date: Fri, 16 Jun 2023 10:43:29 +0800 Subject: [PATCH] e2e: add qos policy test cases (#2924) * e2e: add qos policy test cases Set qos policy for natgw Set qos policy for eip Rebuild qos of natgw when the natgw pod restarts Rebuild qos of eip when the natgw pod restarts Change qos policy of natgw Change qos policy of eip Update qos policy of eip Set specific ip qos policy of natgw Match Qos priority Create natgw with qos policy Create eip with qos policy * Make e2e run faster --- pkg/controller/qos_policy.go | 6 +- test/e2e/framework/docker/network.go | 9 + test/e2e/framework/exec_utils.go | 34 + test/e2e/framework/iptables-eip.go | 27 +- test/e2e/framework/qos-policy.go | 290 ++++++++ test/e2e/framework/vpc-nat-gw.go | 29 +- test/e2e/iptables-vpc-nat-gw/e2e_test.go | 886 +++++++++++++++++++++-- 7 files changed, 1227 insertions(+), 54 deletions(-) create mode 100644 test/e2e/framework/exec_utils.go create mode 100644 test/e2e/framework/qos-policy.go diff --git a/pkg/controller/qos_policy.go b/pkg/controller/qos_policy.go index 32d79518513..64e01d38565 100644 --- a/pkg/controller/qos_policy.go +++ b/pkg/controller/qos_policy.go @@ -400,7 +400,8 @@ func (c *Controller) handleUpdateQoSPolicy(key string) error { if cachedQos.Spec.BindingType == kubeovnv1.QoSBindingTypeEIP { eips, err := c.iptablesEipsLister.List( labels.SelectorFromSet(labels.Set{util.QoSLabel: key})) - if err != nil { + // when eip is not found, we should delete finalizer + if err != nil && !k8serrors.IsNotFound(err) { klog.Errorf("failed to get eip list, %v", err) return err } @@ -414,7 +415,8 @@ func (c *Controller) handleUpdateQoSPolicy(key string) error { if cachedQos.Spec.BindingType == kubeovnv1.QoSBindingTypeNatGw { gws, err := c.vpcNatGatewayLister.List( labels.SelectorFromSet(labels.Set{util.QoSLabel: key})) - if err != nil { + // when nat gw is not found, we should delete finalizer + if err != nil && !k8serrors.IsNotFound(err) { klog.Errorf("failed to get gw list, %v", err) return err } diff --git a/test/e2e/framework/docker/network.go b/test/e2e/framework/docker/network.go index ab2b1de926b..ccac1a1685a 100644 --- a/test/e2e/framework/docker/network.go +++ b/test/e2e/framework/docker/network.go @@ -139,3 +139,12 @@ func NetworkDisconnect(networkID, containerID string) error { return cli.NetworkDisconnect(context.Background(), networkID, containerID, false) } + +func NetworkRemove(networkID string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + defer cli.Close() + return cli.NetworkRemove(context.Background(), networkID) +} diff --git a/test/e2e/framework/exec_utils.go b/test/e2e/framework/exec_utils.go new file mode 100644 index 00000000000..5dbe2db0d8f --- /dev/null +++ b/test/e2e/framework/exec_utils.go @@ -0,0 +1,34 @@ +package framework + +import ( + "context" + + "github.com/kubeovn/kube-ovn/pkg/util" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + + "github.com/onsi/gomega" +) + +// ExecCommandInContainer executes a command in the specified container. +func ExecCommandInContainer(f *Framework, podName, containerName string, cmd ...string) (string, string, error) { + return util.ExecuteCommandInContainer(f.ClientSet, f.ClientConfig(), f.Namespace.Name, podName, containerName, cmd...) +} + +// ExecShellInContainer executes the specified command on the pod's container. +func ExecShellInContainer(f *Framework, podName, containerName string, cmd string) (string, string, error) { + return ExecCommandInContainer(f, podName, containerName, "/bin/sh", "-c", cmd) +} + +func execCommandInPod(ctx context.Context, f *Framework, podName string, cmd ...string) (string, string, error) { + pod, err := f.PodClient().Get(ctx, podName, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get pod %v", podName) + gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) + return ExecCommandInContainer(f, podName, pod.Spec.Containers[0].Name, cmd...) +} + +// ExecShellInPod executes the specified command on the pod. +func ExecShellInPod(ctx context.Context, f *Framework, podName string, cmd string) (string, string, error) { + return execCommandInPod(ctx, f, podName, "/bin/sh", "-c", cmd) +} diff --git a/test/e2e/framework/iptables-eip.go b/test/e2e/framework/iptables-eip.go index 3ec8e1dbbf7..b8a43dc8461 100644 --- a/test/e2e/framework/iptables-eip.go +++ b/test/e2e/framework/iptables-eip.go @@ -90,6 +90,17 @@ func (c *IptablesEIPClient) PatchSync(original, modified *apiv1.IptablesEIP, req return c.Get(eip.Name).DeepCopy() } +// PatchQoS patches the vpc nat gw and waits for the qos to be ready for `timeout`. +// If the qos doesn't become ready before the timeout, it will fail the test. +func (c *IptablesEIPClient) PatchQoSPolicySync(eipName string, qosPolicyName string) *apiv1.IptablesEIP { + eip := c.Get(eipName) + modifiedEIP := eip.DeepCopy() + modifiedEIP.Spec.QoSPolicy = qosPolicyName + _ = c.Patch(eip, modifiedEIP) + ExpectTrue(c.WaitToQoSReady(eipName)) + return c.Get(eipName).DeepCopy() +} + // Delete deletes a iptables eip if the iptables eip exists func (c *IptablesEIPClient) Delete(name string) { err := c.IptablesEIPInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) @@ -118,6 +129,19 @@ func (c *IptablesEIPClient) WaitToBeReady(name string, timeout time.Duration) bo return false } +// WaitToQoSReady returns whether the qos is ready within timeout. +func (c *IptablesEIPClient) WaitToQoSReady(name string) bool { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + eip := c.Get(name) + if eip.Status.QoSPolicy == eip.Spec.QoSPolicy { + Logf("qos %s is ready ", name) + return true + } + Logf("qos %s is not ready ", name) + } + return false +} + // WaitToBeUpdated returns whether the iptables eip is updated within timeout. func (c *IptablesEIPClient) WaitToBeUpdated(eip *apiv1.IptablesEIP, timeout time.Duration) bool { Logf("Waiting up to %v for iptables eip %s to be updated", timeout, eip.Name) @@ -147,7 +171,7 @@ func (c *IptablesEIPClient) WaitToDisappear(name string, interval, timeout time. return nil } -func MakeIptablesEIP(name, v4ip, v6ip, mac, natGwDp, externalSubnet string) *apiv1.IptablesEIP { +func MakeIptablesEIP(name, v4ip, v6ip, mac, natGwDp, externalSubnet, qosPolicyName string) *apiv1.IptablesEIP { eip := &apiv1.IptablesEIP{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -162,5 +186,6 @@ func MakeIptablesEIP(name, v4ip, v6ip, mac, natGwDp, externalSubnet string) *api if externalSubnet != "" { eip.Spec.ExternalSubnet = externalSubnet } + eip.Spec.QoSPolicy = qosPolicyName return eip } diff --git a/test/e2e/framework/qos-policy.go b/test/e2e/framework/qos-policy.go new file mode 100644 index 00000000000..6414c1378be --- /dev/null +++ b/test/e2e/framework/qos-policy.go @@ -0,0 +1,290 @@ +package framework + +import ( + "context" + "errors" + "fmt" + "math/big" + "reflect" + "sort" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" + + "github.com/onsi/gomega" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +// QoSPolicyClient is a struct for qosPolicy client. +type QoSPolicyClient struct { + f *Framework + v1.QoSPolicyInterface +} + +func (f *Framework) QoSPolicyClient() *QoSPolicyClient { + return &QoSPolicyClient{ + f: f, + QoSPolicyInterface: f.KubeOVNClientSet.KubeovnV1().QoSPolicies(), + } +} + +func (s *QoSPolicyClient) Get(name string) *apiv1.QoSPolicy { + qosPolicy, err := s.QoSPolicyInterface.Get(context.TODO(), name, metav1.GetOptions{}) + ExpectNoError(err) + return qosPolicy +} + +// Create creates a new qosPolicy according to the framework specifications +func (c *QoSPolicyClient) Create(qosPolicy *apiv1.QoSPolicy) *apiv1.QoSPolicy { + s, err := c.QoSPolicyInterface.Create(context.TODO(), qosPolicy, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating qosPolicy") + return s.DeepCopy() +} + +// CreateSync creates a new qosPolicy according to the framework specifications, and waits for it to be ready. +func (c *QoSPolicyClient) CreateSync(qosPolicy *apiv1.QoSPolicy) *apiv1.QoSPolicy { + s := c.Create(qosPolicy) + ExpectTrue(c.WaitToQoSReady(s.Name)) + // Get the newest qosPolicy after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Update updates the qosPolicy +func (c *QoSPolicyClient) Update(qosPolicy *apiv1.QoSPolicy, options metav1.UpdateOptions, timeout time.Duration) *apiv1.QoSPolicy { + var updatedQoSPolicy *apiv1.QoSPolicy + err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + s, err := c.QoSPolicyInterface.Update(ctx, qosPolicy, options) + if err != nil { + return handleWaitingAPIError(err, false, "update qosPolicy %q", qosPolicy.Name) + } + updatedQoSPolicy = s + return true, nil + }) + if err == nil { + return updatedQoSPolicy.DeepCopy() + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while retrying to update qosPolicy %s", qosPolicy.Name) + } + Failf("error occurred while retrying to update qosPolicy %s: %v", qosPolicy.Name, err) + + return nil +} + +// UpdateSync updates the qosPolicy and waits for the qosPolicy to be ready for `timeout`. +// If the qosPolicy doesn't become ready before the timeout, it will fail the test. +func (c *QoSPolicyClient) UpdateSync(qosPolicy *apiv1.QoSPolicy, options metav1.UpdateOptions, timeout time.Duration) *apiv1.QoSPolicy { + s := c.Update(qosPolicy, options, timeout) + ExpectTrue(c.WaitToBeUpdated(s, timeout)) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest qosPolicy after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Patch patches the qosPolicy +func (c *QoSPolicyClient) Patch(original, modified *apiv1.QoSPolicy) *apiv1.QoSPolicy { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedQoSPolicy *apiv1.QoSPolicy + err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + s, err := c.QoSPolicyInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch qosPolicy %q", original.Name) + } + patchedQoSPolicy = s + return true, nil + }) + if err == nil { + return patchedQoSPolicy.DeepCopy() + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while retrying to patch qosPolicy %s", original.Name) + } + Failf("error occurred while retrying to patch qosPolicy %s: %v", original.Name, err) + + return nil +} + +// PatchSync patches the qosPolicy and waits for the qosPolicy to be ready for `timeout`. +// If the qosPolicy doesn't become ready before the timeout, it will fail the test. +func (c *QoSPolicyClient) PatchSync(original, modified *apiv1.QoSPolicy) *apiv1.QoSPolicy { + s := c.Patch(original, modified) + ExpectTrue(c.WaitToBeUpdated(s, timeout)) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest qosPolicy after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Delete deletes a qosPolicy if the qosPolicy exists +func (c *QoSPolicyClient) Delete(name string) { + err := c.QoSPolicyInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + Failf("Failed to delete qosPolicy %q: %v", name, err) + } +} + +// DeleteSync deletes the qosPolicy and waits for the qosPolicy to disappear for `timeout`. +// If the qosPolicy doesn't disappear before the timeout, it will fail the test. +func (c *QoSPolicyClient) DeleteSync(name string) { + c.Delete(name) + gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for qosPolicy %q to disappear", name) +} + +func isQoSPolicyConditionSetAsExpected(qosPolicy *apiv1.QoSPolicy, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { + for _, cond := range qosPolicy.Status.Conditions { + if cond.Type == conditionType { + if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { + return true + } + if !silent { + Logf("Condition %s of qosPolicy %s is %v instead of %t. Reason: %v, message: %v", + conditionType, qosPolicy.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } + return false + } + } + if !silent { + Logf("Couldn't find condition %v on qosPolicy %v", conditionType, qosPolicy.Name) + } + return false +} + +// IsQoSPolicyConditionSetAsExpected returns a wantTrue value if the qosPolicy has a match to the conditionType, +// otherwise returns an opposite value of the wantTrue with detailed logging. +func IsQoSPolicyConditionSetAsExpected(qosPolicy *apiv1.QoSPolicy, conditionType apiv1.ConditionType, wantTrue bool) bool { + return isQoSPolicyConditionSetAsExpected(qosPolicy, conditionType, wantTrue, false) +} + +// WaitConditionToBe returns whether qosPolicy "name's" condition state matches wantTrue +// within timeout. If wantTrue is true, it will ensure the qosPolicy condition status is +// ConditionTrue; if it's false, it ensures the qosPolicy condition is in any state other +// than ConditionTrue (e.g. not true or unknown). +func (c *QoSPolicyClient) WaitConditionToBe(name string, conditionType apiv1.ConditionType, wantTrue bool, timeout time.Duration) bool { + Logf("Waiting up to %v for qosPolicy %s condition %s to be %t", timeout, name, conditionType, wantTrue) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + qosPolicy := c.Get(name) + if IsQoSPolicyConditionSetAsExpected(qosPolicy, conditionType, wantTrue) { + Logf("QoSPolicy %s reach desired %t condition status", name, wantTrue) + return true + } + Logf("QoSPolicy %s still not reach desired %t condition status", name, wantTrue) + } + Logf("QoSPolicy %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) + return false +} + +// WaitToBeReady returns whether the qosPolicy is ready within timeout. +func (c *QoSPolicyClient) WaitToBeReady(name string, timeout time.Duration) bool { + return c.WaitConditionToBe(name, apiv1.Ready, true, timeout) +} + +// WaitToBeUpdated returns whether the qosPolicy is updated within timeout. +func (c *QoSPolicyClient) WaitToBeUpdated(qosPolicy *apiv1.QoSPolicy, timeout time.Duration) bool { + Logf("Waiting up to %v for qosPolicy %s to be updated", timeout, qosPolicy.Name) + rv, _ := big.NewInt(0).SetString(qosPolicy.ResourceVersion, 10) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + s := c.Get(qosPolicy.Name) + if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { + Logf("QoSPolicy %s updated", qosPolicy.Name) + return true + } + Logf("QoSPolicy %s still not updated", qosPolicy.Name) + } + Logf("QoSPolicy %s was not updated within %v", qosPolicy.Name, timeout) + return false +} + +// WaitUntil waits the given timeout duration for the specified condition to be met. +func (c *QoSPolicyClient) WaitUntil(name string, cond func(s *apiv1.QoSPolicy) (bool, error), condDesc string, interval, timeout time.Duration) *apiv1.QoSPolicy { + var qosPolicy *apiv1.QoSPolicy + err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(_ context.Context) (bool, error) { + Logf("Waiting for qosPolicy %s to meet condition %q", name, condDesc) + qosPolicy = c.Get(name).DeepCopy() + met, err := cond(qosPolicy) + if err != nil { + return false, fmt.Errorf("failed to check condition for qosPolicy %s: %v", name, err) + } + return met, nil + }) + if err == nil { + return qosPolicy + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while waiting for qosPolicy %s to meet condition %q", name, condDesc) + } + Failf("error occurred while waiting for qosPolicy %s to meet condition %q: %v", name, condDesc, err) + + return nil +} + +// WaitToDisappear waits the given timeout duration for the specified qosPolicy to disappear. +func (c *QoSPolicyClient) WaitToDisappear(name string, interval, timeout time.Duration) error { + err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.QoSPolicy, error) { + qosPolicy, err := c.QoSPolicyInterface.Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil, nil + } + return qosPolicy, err + })).WithTimeout(timeout).Should(gomega.BeNil()) + if err != nil { + return fmt.Errorf("expected qosPolicy %s to not be found: %w", name, err) + } + return nil +} + +// WaitToQoSReady returns whether the qos is ready within timeout. +func (c *QoSPolicyClient) WaitToQoSReady(name string) bool { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + qos := c.Get(name) + if len(qos.Spec.BandwidthLimitRules) != len(qos.Status.BandwidthLimitRules) { + Logf("qos %s is not ready ", name) + continue + } + sort.Slice(qos.Spec.BandwidthLimitRules, func(i, j int) bool { + return qos.Spec.BandwidthLimitRules[i].Name < qos.Spec.BandwidthLimitRules[j].Name + }) + sort.Slice(qos.Status.BandwidthLimitRules, func(i, j int) bool { + return qos.Status.BandwidthLimitRules[i].Name < qos.Status.BandwidthLimitRules[j].Name + }) + equalCount := 0 + for index, specRule := range qos.Spec.BandwidthLimitRules { + statusRule := qos.Status.BandwidthLimitRules[index] + if reflect.DeepEqual(specRule, statusRule) { + equalCount += 1 + } + } + + if equalCount == len(qos.Spec.BandwidthLimitRules) { + Logf("qos %s is ready ", name) + return true + } + Logf("qos %s is not ready ", name) + } + return false +} + +func MakeQoSPolicy(name string, shared bool, qosType apiv1.QoSPolicyBindingType, rules apiv1.QoSPolicyBandwidthLimitRules) *apiv1.QoSPolicy { + qosPolicy := &apiv1.QoSPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiv1.QoSPolicySpec{ + BandwidthLimitRules: rules, + Shared: shared, + BindingType: qosType, + }, + } + return qosPolicy +} diff --git a/test/e2e/framework/vpc-nat-gw.go b/test/e2e/framework/vpc-nat-gw.go index c5ff47c2eb7..404f6c78c9c 100644 --- a/test/e2e/framework/vpc-nat-gw.go +++ b/test/e2e/framework/vpc-nat-gw.go @@ -82,7 +82,7 @@ func (c *VpcNatGatewayClient) Patch(original, modified *apiv1.VpcNatGateway) *ap // PatchSync patches the vpc nat gw and waits for the vpc nat gw to be ready for `timeout`. // If the vpc nat gw doesn't become ready before the timeout, it will fail the test. -func (c *VpcNatGatewayClient) PatchSync(original, modified *apiv1.VpcNatGateway, requiredNodes []string, timeout time.Duration) *apiv1.VpcNatGateway { +func (c *VpcNatGatewayClient) PatchSync(original, modified *apiv1.VpcNatGateway, timeout time.Duration) *apiv1.VpcNatGateway { vpcNatGw := c.Patch(original, modified) ExpectTrue(c.WaitToBeUpdated(vpcNatGw, timeout)) ExpectTrue(c.WaitToBeReady(vpcNatGw.Name, timeout)) @@ -90,6 +90,17 @@ func (c *VpcNatGatewayClient) PatchSync(original, modified *apiv1.VpcNatGateway, return c.Get(vpcNatGw.Name).DeepCopy() } +// PatchQoS patches the vpc nat gw and waits for the qos to be ready for `timeout`. +// If the qos doesn't become ready before the timeout, it will fail the test. +func (c *VpcNatGatewayClient) PatchQoSPolicySync(natgwName string, qosPolicyName string) *apiv1.VpcNatGateway { + natgw := c.Get(natgwName) + modifiedNATGW := natgw.DeepCopy() + modifiedNATGW.Spec.QoSPolicy = qosPolicyName + _ = c.Patch(natgw, modifiedNATGW) + ExpectTrue(c.WaitToQoSReady(natgwName)) + return c.Get(natgwName).DeepCopy() +} + // Delete deletes a vpc nat gw if the vpc nat gw exists func (c *VpcNatGatewayClient) Delete(name string) { err := c.VpcNatGatewayInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) @@ -144,7 +155,20 @@ func (c *VpcNatGatewayClient) WaitToDisappear(name string, interval, timeout tim return nil } -func MakeVpcNatGateway(name, vpc, subnet, lanIp, externalSubnet string) *apiv1.VpcNatGateway { +// WaitToQoSReady returns whether the qos is ready within timeout. +func (c *VpcNatGatewayClient) WaitToQoSReady(name string) bool { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + natgw := c.Get(name) + if natgw.Status.QoSPolicy == natgw.Spec.QoSPolicy { + Logf("qos %s is ready ", name) + return true + } + Logf("qos %s is not ready ", name) + } + return false +} + +func MakeVpcNatGateway(name, vpc, subnet, lanIp, externalSubnet, qosPolicyName string) *apiv1.VpcNatGateway { vpcNatGw := &apiv1.VpcNatGateway{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -158,5 +182,6 @@ func MakeVpcNatGateway(name, vpc, subnet, lanIp, externalSubnet string) *apiv1.V if externalSubnet != "" { vpcNatGw.Spec.ExternalSubnets = []string{externalSubnet} } + vpcNatGw.Spec.QoSPolicy = qosPolicyName return vpcNatGw } diff --git a/test/e2e/iptables-vpc-nat-gw/e2e_test.go b/test/e2e/iptables-vpc-nat-gw/e2e_test.go index ccd2fbe5f95..45436016b62 100644 --- a/test/e2e/iptables-vpc-nat-gw/e2e_test.go +++ b/test/e2e/iptables-vpc-nat-gw/e2e_test.go @@ -2,14 +2,18 @@ package ovn_eip import ( "context" + "errors" "flag" "fmt" "os" "path/filepath" + "strconv" "strings" "testing" + "time" dockertypes "github.com/docker/docker/api/types" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -34,6 +38,34 @@ const vpcNatGWConfigMapName = "ovn-vpc-nat-gw-config" const networkAttachDefName = "ovn-vpc-external-network" const externalSubnetProvider = "ovn-vpc-external-network.kube-system" +const iperf2Port = "20288" +const skipIperf = true + +const ( + eipLimit = iota*5 + 10 + updatedEIPLimit + newEIPLimit + specificIPLimit + defaultNicLimit +) + +type qosParams struct { + vpc1Name string + vpc2Name string + vpc1SubnetName string + vpc2SubnetName string + vpcNat1GwName string + vpcNat2GwName string + vpc1EIPName string + vpc2EIPName string + vpc1FIPName string + vpc2FIPName string + vpc1PodName string + vpc2PodName string + attachDefName string + subnetProvider string +} + func setupVpcNatGwTestEnvironment( f *framework.Framework, dockerExtNetNetwork *dockertypes.NetworkResource, @@ -51,52 +83,60 @@ func setupVpcNatGwTestEnvironment( externalNetworkName string, nicName string, provider string, + skipNADSetup bool, ) { ginkgo.By("Getting docker network " + dockerExtNetName) network, err := docker.NetworkInspect(dockerExtNetName) framework.ExpectNoError(err, "getting docker network "+dockerExtNetName) - ginkgo.By("Getting k8s nodes") - _, err = e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet) - framework.ExpectNoError(err) - - ginkgo.By("Getting network attachment definition " + externalNetworkName) - attachConf := fmt.Sprintf(`{"cniVersion": "0.3.0","type": "macvlan","master": "%s","mode": "bridge"}`, nicName) - attachNet := framework.MakeNetworkAttachmentDefinition(externalNetworkName, framework.KubeOvnNamespace, attachConf) - attachNetClient.Create(attachNet) - - nad := attachNetClient.Get(externalNetworkName) - framework.ExpectNoError(err, "failed to get") - ginkgo.By("Got network attachment definition " + nad.Name) - - ginkgo.By("Creating underlay macvlan subnet " + externalNetworkName) - cidr := make([]string, 0, 2) - gateway := make([]string, 0, 2) - for _, config := range dockerExtNetNetwork.IPAM.Config { - switch util.CheckProtocol(config.Subnet) { - case apiv1.ProtocolIPv4: - if f.ClusterIpFamily != "ipv6" { - cidr = append(cidr, config.Subnet) - gateway = append(gateway, config.Gateway) + if !skipNADSetup { + ginkgo.By("Getting network attachment definition " + externalNetworkName) + attachConf := fmt.Sprintf(`{ + "cniVersion": "0.3.0", + "type": "macvlan", + "master": "%s", + "mode": "bridge", + "ipam": { + "type": "kube-ovn", + "server_socket": "/run/openvswitch/kube-ovn-daemon.sock", + "provider": "%s" } - case apiv1.ProtocolIPv6: - if f.ClusterIpFamily != "ipv4" { - cidr = append(cidr, config.Subnet) - gateway = append(gateway, config.Gateway) + }`, nicName, provider) + attachNet := framework.MakeNetworkAttachmentDefinition(externalNetworkName, framework.KubeOvnNamespace, attachConf) + attachNetClient.Create(attachNet) + nad := attachNetClient.Get(externalNetworkName) + + ginkgo.By("Got network attachment definition " + nad.Name) + + ginkgo.By("Creating underlay macvlan subnet " + externalNetworkName) + cidr := make([]string, 0, 2) + gateway := make([]string, 0, 2) + for _, config := range dockerExtNetNetwork.IPAM.Config { + switch util.CheckProtocol(config.Subnet) { + case apiv1.ProtocolIPv4: + if f.ClusterIpFamily != "ipv6" { + cidr = append(cidr, config.Subnet) + gateway = append(gateway, config.Gateway) + } + case apiv1.ProtocolIPv6: + if f.ClusterIpFamily != "ipv4" { + cidr = append(cidr, config.Subnet) + gateway = append(gateway, config.Gateway) + } } } - } - excludeIPs := make([]string, 0, len(network.Containers)*2) - for _, container := range network.Containers { - if container.IPv4Address != "" && f.ClusterIpFamily != "ipv6" { - excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) - } - if container.IPv6Address != "" && f.ClusterIpFamily != "ipv4" { - excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0]) + excludeIPs := make([]string, 0, len(network.Containers)*2) + for _, container := range network.Containers { + if container.IPv4Address != "" && f.ClusterIpFamily != "ipv6" { + excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) + } + if container.IPv6Address != "" && f.ClusterIpFamily != "ipv4" { + excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0]) + } } + macvlanSubnet := framework.MakeSubnet(externalNetworkName, "", strings.Join(cidr, ","), strings.Join(gateway, ","), "", provider, excludeIPs, nil, nil) + _ = subnetClient.CreateSync(macvlanSubnet) } - macvlanSubnet := framework.MakeSubnet(externalNetworkName, "", strings.Join(cidr, ","), strings.Join(gateway, ","), "", provider, excludeIPs, nil, nil) - _ = subnetClient.CreateSync(macvlanSubnet) ginkgo.By("Getting config map " + vpcNatGWConfigMapName) _, err = f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.Background(), vpcNatGWConfigMapName, metav1.GetOptions{}) @@ -111,7 +151,7 @@ func setupVpcNatGwTestEnvironment( _ = subnetClient.CreateSync(overlaySubnet) ginkgo.By("Creating custom vpc nat gw") - vpcNatGw := framework.MakeVpcNatGateway(vpcNatGwName, vpcName, overlaySubnetName, lanIp, externalNetworkName) + vpcNatGw := framework.MakeVpcNatGateway(vpcNatGwName, vpcName, overlaySubnetName, lanIp, externalNetworkName, "") _ = vpcNatGwClient.CreateSync(vpcNatGw) } @@ -311,9 +351,9 @@ var _ = framework.Describe("[group:iptables-vpc-nat-gw]", func() { }) framework.ConformanceIt("iptables eip fip snat dnat", func() { - overlaySubnetV4Cidr := "192.168.0.0/24" - overlaySubnetV4Gw := "192.168.0.1" - lanIp := "192.168.0.254" + overlaySubnetV4Cidr := "10.0.0.0/24" + overlaySubnetV4Gw := "10.0.0.1" + lanIp := "10.0.0.254" setupVpcNatGwTestEnvironment( f, dockerExtNet1Network, attachNetClient, subnetClient, vpcClient, vpcNatGwClient, @@ -321,6 +361,7 @@ var _ = framework.Describe("[group:iptables-vpc-nat-gw]", func() { overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIp, dockerExtNet1Name, networkAttachDefName, net1NicName, externalSubnetProvider, + false, ) ginkgo.By("Creating iptables vip for fip") @@ -328,14 +369,14 @@ var _ = framework.Describe("[group:iptables-vpc-nat-gw]", func() { _ = vipClient.CreateSync(fipVip) fipVip = vipClient.Get(fipVipName) ginkgo.By("Creating iptables eip for fip") - fipEip := framework.MakeIptablesEIP(fipEipName, "", "", "", vpcNatGwName, "") + fipEip := framework.MakeIptablesEIP(fipEipName, "", "", "", vpcNatGwName, "", "") _ = iptablesEIPClient.CreateSync(fipEip) ginkgo.By("Creating iptables fip") fip := framework.MakeIptablesFIPRule(fipName, fipEipName, fipVip.Status.V4ip) _ = iptablesFIPClient.CreateSync(fip) ginkgo.By("Creating iptables eip for snat") - snatEip := framework.MakeIptablesEIP(snatEipName, "", "", "", vpcNatGwName, "") + snatEip := framework.MakeIptablesEIP(snatEipName, "", "", "", vpcNatGwName, "", "") _ = iptablesEIPClient.CreateSync(snatEip) ginkgo.By("Creating iptables snat") snat := framework.MakeIptablesSnatRule(snatName, snatEipName, overlaySubnetV4Cidr) @@ -346,7 +387,7 @@ var _ = framework.Describe("[group:iptables-vpc-nat-gw]", func() { _ = vipClient.CreateSync(dnatVip) dnatVip = vipClient.Get(dnatVipName) ginkgo.By("Creating iptables eip for dnat") - dnatEip := framework.MakeIptablesEIP(dnatEipName, "", "", "", vpcNatGwName, "") + dnatEip := framework.MakeIptablesEIP(dnatEipName, "", "", "", vpcNatGwName, "", "") _ = iptablesEIPClient.CreateSync(dnatEip) ginkgo.By("Creating iptables dnat") dnat := framework.MakeIptablesDnatRule(dnatName, dnatEipName, "80", "tcp", dnatVip.Status.V4ip, "8080") @@ -358,7 +399,7 @@ var _ = framework.Describe("[group:iptables-vpc-nat-gw]", func() { _ = vipClient.CreateSync(shareVip) fipVip = vipClient.Get(fipVipName) ginkgo.By("Creating share iptables eip") - shareEip := framework.MakeIptablesEIP(sharedEipName, "", "", "", vpcNatGwName, "") + shareEip := framework.MakeIptablesEIP(sharedEipName, "", "", "", vpcNatGwName, "", "") _ = iptablesEIPClient.CreateSync(shareEip) ginkgo.By("Creating the first iptables fip with share eip vip should be ok") shareFipShouldOk := framework.MakeIptablesFIPRule(sharedEipFipShoudOkName, sharedEipName, fipVip.Status.V4ip) @@ -454,19 +495,21 @@ var _ = framework.Describe("[group:iptables-vpc-nat-gw]", func() { subnetClient.DeleteSync(overlaySubnetName) // multiple external network case - net2OverlaySubnetV4Cidr := "192.168.1.0/24" - net2OoverlaySubnetV4Gw := "192.168.1.1" - net2LanIp := "192.168.1.254" + net2OverlaySubnetV4Cidr := "10.0.1.0/24" + net2OoverlaySubnetV4Gw := "10.0.1.1" + net2LanIp := "10.0.1.254" setupVpcNatGwTestEnvironment( f, dockerExtNet2Network, attachNetClient, subnetClient, vpcClient, vpcNatGwClient, net2VpcName, net2OverlaySubnetName, net2VpcNatGwName, net2OverlaySubnetV4Cidr, net2OoverlaySubnetV4Gw, net2LanIp, dockerExtNet2Name, net2AttachDefName, net2NicName, - net2SubnetProvider) + net2SubnetProvider, + false, + ) ginkgo.By("Creating iptables eip of net2") - net2Eip := framework.MakeIptablesEIP(net2EipName, "", "", "", net2VpcNatGwName, net2AttachDefName) + net2Eip := framework.MakeIptablesEIP(net2EipName, "", "", "", net2VpcNatGwName, net2AttachDefName, "") _ = iptablesEIPClient.CreateSync(net2Eip) ginkgo.By("Deleting iptables eip " + net2EipName) @@ -499,6 +542,751 @@ var _ = framework.Describe("[group:iptables-vpc-nat-gw]", func() { }) }) +func iperf(f *framework.Framework, iperfClientPod *corev1.Pod, iperfServerEIP *apiv1.IptablesEIP) string { + for i := 0; i < 3; i++ { + command := fmt.Sprintf("iperf -e -p %s --reportstyle C -i 1 -c %s -t 10", iperf2Port, iperfServerEIP.Status.IP) + stdOutput, errOutput, err := framework.ExecShellInPod(context.Background(), f, iperfClientPod.Name, command) + framework.Logf("output from exec on client pod %s (eip %s)\n", iperfClientPod.Name, iperfServerEIP.Name) + if stdOutput != "" && err == nil { + framework.Logf("output:\n%s", stdOutput) + return stdOutput + } + framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s, retrying", command, err, errOutput, stdOutput) + time.Sleep(3 * time.Second) + } + framework.ExpectNoError(errors.New("iperf failed")) + return "" +} + +func checkQos(f *framework.Framework, + vpc1Pod *corev1.Pod, vpc2Pod *corev1.Pod, vpc1EIP *apiv1.IptablesEIP, vpc2EIP *apiv1.IptablesEIP, + limit int, expect bool) { + if !skipIperf { + if expect { + output := iperf(f, vpc1Pod, vpc2EIP) + framework.ExpectTrue(vaildRateLimit(output, limit)) + output = iperf(f, vpc2Pod, vpc1EIP) + framework.ExpectTrue(vaildRateLimit(output, limit)) + } else { + output := iperf(f, vpc1Pod, vpc2EIP) + framework.ExpectFalse(vaildRateLimit(output, limit)) + output = iperf(f, vpc2Pod, vpc1EIP) + framework.ExpectFalse(vaildRateLimit(output, limit)) + } + } +} + +func newVPCqosParamsInit() *qosParams { + qosParames := &qosParams{ + vpc1Name: "qos-vpc1-" + framework.RandomSuffix(), + vpc2Name: "qos-vpc2-" + framework.RandomSuffix(), + vpc1SubnetName: "qos-vpc1-subnet-" + framework.RandomSuffix(), + vpc2SubnetName: "qos-vpc2-subnet-" + framework.RandomSuffix(), + vpcNat1GwName: "qos-vpc1-gw-" + framework.RandomSuffix(), + vpcNat2GwName: "qos-vpc2-gw-" + framework.RandomSuffix(), + vpc1EIPName: "qos-vpc1-eip-" + framework.RandomSuffix(), + vpc2EIPName: "qos-vpc2-eip-" + framework.RandomSuffix(), + vpc1FIPName: "qos-vpc1-fip-" + framework.RandomSuffix(), + vpc2FIPName: "qos-vpc2-fip-" + framework.RandomSuffix(), + vpc1PodName: "qos-vpc1-pod-" + framework.RandomSuffix(), + vpc2PodName: "qos-vpc2-pod-" + framework.RandomSuffix(), + attachDefName: "qos-ovn-vpc-external-network-" + framework.RandomSuffix(), + } + qosParames.subnetProvider = qosParames.attachDefName + ".kube-system" + return qosParames +} + +func getNicDefaultQoSPolicy(limit int) apiv1.QoSPolicyBandwidthLimitRules { + return apiv1.QoSPolicyBandwidthLimitRules{ + &apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-ingress", + Interface: "net1", + RateMax: fmt.Sprint(limit), + BurstMax: fmt.Sprint(limit), + Priority: 3, + Direction: apiv1.DirectionIngress, + }, + &apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-egress", + Interface: "net1", + RateMax: fmt.Sprint(limit), + BurstMax: fmt.Sprint(limit), + Priority: 3, + Direction: apiv1.DirectionEgress, + }, + } +} + +func getEIPQoSRule(limit int) apiv1.QoSPolicyBandwidthLimitRules { + return apiv1.QoSPolicyBandwidthLimitRules{ + &apiv1.QoSPolicyBandwidthLimitRule{ + Name: "eip-ingress", + RateMax: fmt.Sprint(limit), + BurstMax: fmt.Sprint(limit), + Priority: 1, + Direction: apiv1.DirectionIngress, + }, + &apiv1.QoSPolicyBandwidthLimitRule{ + Name: "eip-egress", + RateMax: fmt.Sprint(limit), + BurstMax: fmt.Sprint(limit), + Priority: 1, + Direction: apiv1.DirectionEgress, + }, + } +} + +func getSpecialQoSRule(limit int, ip string) apiv1.QoSPolicyBandwidthLimitRules { + return apiv1.QoSPolicyBandwidthLimitRules{ + &apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-extip-ingress", + Interface: "net1", + RateMax: fmt.Sprint(limit), + BurstMax: fmt.Sprint(limit), + Priority: 2, + Direction: apiv1.DirectionIngress, + MatchType: apiv1.MatchTypeIP, + MatchValue: "src " + ip + "/32", + }, + &apiv1.QoSPolicyBandwidthLimitRule{ + Name: "net1-extip-egress", + Interface: "net1", + RateMax: fmt.Sprint(limit), + BurstMax: fmt.Sprint(limit), + Priority: 2, + Direction: apiv1.DirectionEgress, + MatchType: apiv1.MatchTypeIP, + MatchValue: "dst " + ip + "/32", + }, + } +} + +// defaultQoSCases test default qos policy= +func defaultQoSCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + podClient *framework.PodClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, +) { + // create nic qos policy + qosPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + qosPolicyName) + rules := getNicDefaultQoSPolicy(defaultNicLimit) + + qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) + _ = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + fmt.Sprint(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Delete natgw pod " + natgwName + "-0") + natGwPodName := "vpc-nat-gw-" + natgwName + "-0" + podClient.DeleteSync(natGwPodName) + + ginkgo.By("Wait for natgw " + natgwName + "qos rebuid") + time.Sleep(5 * time.Second) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + fmt.Sprint(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Deleting qos policy " + qosPolicyName) + qosPolicyClient.DeleteSync(qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + fmt.Sprint(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) +} + +// eipQoSCases test default qos policy +func eipQoSCases(f *framework.Framework, + eipClient *framework.IptablesEIPClient, + podClient *framework.PodClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + eipName string, + natgwName string, +) { + // create eip qos policy + qosPolicyName := "eip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + qosPolicyName) + rules := getEIPQoSRule(eipLimit) + + qosPolicy := framework.MakeQoSPolicy(qosPolicyName, false, apiv1.QoSBindingTypeEIP, rules) + qosPolicy = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Patch eip " + eipName + " with qos policy " + qosPolicyName) + _ = eipClient.PatchQoSPolicySync(eipName, qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + fmt.Sprint(eipLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) + + ginkgo.By("Update qos policy " + qosPolicyName + " with new rate limit") + + rules = getEIPQoSRule(updatedEIPLimit) + modifiedqosPolicy := qosPolicy.DeepCopy() + modifiedqosPolicy.Spec.BandwidthLimitRules = rules + qosPolicyClient.Patch(qosPolicy, modifiedqosPolicy) + qosPolicyClient.WaitToQoSReady(qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is changed to " + fmt.Sprint(updatedEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) + + ginkgo.By("Delete natgw pod " + natgwName + "-0") + natGwPodName := "vpc-nat-gw-" + natgwName + "-0" + podClient.DeleteSync(natGwPodName) + + ginkgo.By("Wait for natgw " + natgwName + "qos rebuid") + time.Sleep(5 * time.Second) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + fmt.Sprint(updatedEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) + + newQoSPolicyName := "new-eip-qos-policy-" + framework.RandomSuffix() + newRules := getEIPQoSRule(newEIPLimit) + newQoSPolicy := framework.MakeQoSPolicy(newQoSPolicyName, false, apiv1.QoSBindingTypeEIP, newRules) + _ = qosPolicyClient.CreateSync(newQoSPolicy) + + ginkgo.By("Change qos policy of eip " + eipName + " to " + newQoSPolicyName) + _ = eipClient.PatchQoSPolicySync(eipName, newQoSPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + fmt.Sprint(newEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, true) + + ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + eipName) + _ = eipClient.PatchQoSPolicySync(eipName, "") + + ginkgo.By("Deleting qos policy " + qosPolicyName) + qosPolicyClient.DeleteSync(qosPolicyName) + + ginkgo.By("Deleting qos policy " + newQoSPolicyName) + qosPolicyClient.DeleteSync(newQoSPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + fmt.Sprint(newEIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, false) +} + +// specifyingIPQoSCases test default qos policy +func specifyingIPQoSCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, +) { + // create nic qos policy + qosPolicyName := "specifying-ip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + qosPolicyName) + + rules := getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP) + + qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) + _ = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is limited to " + fmt.Sprint(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) + + ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Deleting qos policy " + qosPolicyName) + qosPolicyClient.DeleteSync(qosPolicyName) + + ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + fmt.Sprint(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, false) +} + +// priorityQoSCases test qos match priority +func priorityQoSCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + eipClient *framework.IptablesEIPClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc1EIP *apiv1.IptablesEIP, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, + eipName string, +) { + // create nic qos policy + natGwQoSPolicyName := "priority-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + natGwQoSPolicyName) + // default qos policy + special qos policy + natgwRules := getNicDefaultQoSPolicy(defaultNicLimit) + natgwRules = append(natgwRules, getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP)...) + + natgwQoSPolicy := framework.MakeQoSPolicy(natGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, natgwRules) + _ = qosPolicyClient.CreateSync(natgwQoSPolicy) + + ginkgo.By("Patch natgw " + natgwName + " with qos policy " + natGwQoSPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, natGwQoSPolicyName) + + eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + eipQoSPolicyName) + eipRules := getEIPQoSRule(eipLimit) + + eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, eipRules) + _ = qosPolicyClient.CreateSync(eipQoSPolicy) + + ginkgo.By("Patch eip " + eipName + " with qos policy " + eipQoSPolicyName) + _ = eipClient.PatchQoSPolicySync(eipName, eipQoSPolicyName) + + // match qos of priority 1 + ginkgo.By("Check qos to match priority 1 is limited to " + fmt.Sprint(eipLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) + + ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + eipName) + _ = eipClient.PatchQoSPolicySync(eipName, "") + + ginkgo.By("Deleting qos policy " + eipQoSPolicyName) + qosPolicyClient.DeleteSync(eipQoSPolicyName) + + // match qos of priority 2 + ginkgo.By("Check qos to match priority 2 is limited to " + fmt.Sprint(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) + + // change qos policy of natgw + newNatGwQoSPolicyName := "new-priority-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + newNatGwQoSPolicyName) + newNatgwRules := getNicDefaultQoSPolicy(defaultNicLimit) + + newNatgwQoSPolicy := framework.MakeQoSPolicy(newNatGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, newNatgwRules) + _ = qosPolicyClient.CreateSync(newNatgwQoSPolicy) + + ginkgo.By("Change qos policy of natgw " + natgwName + " to " + newNatGwQoSPolicyName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, newNatGwQoSPolicyName) + + // match qos of priority 3 + ginkgo.By("Check qos to match priority 3 is limited to " + fmt.Sprint(specificIPLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Remove qos policy " + natGwQoSPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Deleting qos policy " + natGwQoSPolicyName) + qosPolicyClient.DeleteSync(natGwQoSPolicyName) + + ginkgo.By("Deleting qos policy " + newNatGwQoSPolicyName) + qosPolicyClient.DeleteSync(newNatGwQoSPolicyName) + + ginkgo.By("Check qos " + natGwQoSPolicyName + " is not limited to " + fmt.Sprint(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) +} + +func createNatGwAndSetQosCases(f *framework.Framework, + vpcNatGwClient *framework.VpcNatGatewayClient, + ipClient *framework.IpClient, + eipClient *framework.IptablesEIPClient, + fipClient *framework.IptablesFIPClient, + subnetClient *framework.SubnetClient, + qosPolicyClient *framework.QoSPolicyClient, + vpc1Pod *corev1.Pod, + vpc2Pod *corev1.Pod, + vpc2EIP *apiv1.IptablesEIP, + natgwName string, + eipName string, + fipName string, + vpcName string, + overlaySubnetName string, + lanIp string, + attachDefName string, +) { + // delete fip + ginkgo.By("Deleting fip " + fipName) + fipClient.DeleteSync(fipName) + + ginkgo.By("Deleting eip " + eipName) + eipClient.DeleteSync(eipName) + + // the only pod for vpc nat gateway + vpcNatGw1PodName := "vpc-nat-gw-" + natgwName + "-0" + + // delete vpc nat gw statefulset remaining ip for eth0 and net2 + ginkgo.By("Deleting custom vpc nat gw " + natgwName) + vpcNatGwClient.DeleteSync(natgwName) + + overlaySubnet1 := subnetClient.Get(overlaySubnetName) + macvlanSubnet := subnetClient.Get(attachDefName) + eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) + net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) + ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) + ipClient.DeleteSync(eth0IpName) + ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) + ipClient.DeleteSync(net1IpName) + + natgwQoSPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + natgwQoSPolicyName) + rules := getNicDefaultQoSPolicy(defaultNicLimit) + + qosPolicy := framework.MakeQoSPolicy(natgwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) + _ = qosPolicyClient.CreateSync(qosPolicy) + + ginkgo.By("Creating custom vpc nat gw") + vpcNatGw := framework.MakeVpcNatGateway(natgwName, vpcName, overlaySubnetName, lanIp, attachDefName, natgwQoSPolicyName) + _ = vpcNatGwClient.CreateSync(vpcNatGw) + + eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() + ginkgo.By("Creating qos policy " + eipQoSPolicyName) + rules = getEIPQoSRule(eipLimit) + + eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, rules) + _ = qosPolicyClient.CreateSync(eipQoSPolicy) + + ginkgo.By("Creating eip " + eipName) + vpc1EIP := framework.MakeIptablesEIP(eipName, "", "", "", natgwName, attachDefName, eipQoSPolicyName) + vpc1EIP = eipClient.CreateSync(vpc1EIP) + + ginkgo.By("Creating fip " + fipName) + fip := framework.MakeIptablesFIPRule(fipName, eipName, vpc1Pod.Status.PodIP) + _ = fipClient.CreateSync(fip) + + ginkgo.By("Check qos " + eipQoSPolicyName + " is limited to " + fmt.Sprint(eipLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) + + ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + natgwName) + _ = eipClient.PatchQoSPolicySync(eipName, "") + + ginkgo.By("Check qos " + natgwQoSPolicyName + " is limited to " + fmt.Sprint(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) + + ginkgo.By("Remove qos policy " + natgwQoSPolicyName + " from natgw " + natgwName) + _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") + + ginkgo.By("Check qos " + natgwQoSPolicyName + " is not limited to " + fmt.Sprint(defaultNicLimit) + "Mbps") + checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) + + ginkgo.By("Deleting qos policy " + natgwQoSPolicyName) + qosPolicyClient.DeleteSync(natgwQoSPolicyName) + + ginkgo.By("Deleting qos policy " + eipQoSPolicyName) + qosPolicyClient.DeleteSync(eipQoSPolicyName) +} + +func vaildRateLimit(text string, limit int) bool { + lines := strings.Split(text, "\n") + for _, line := range lines { + if line == "" { + continue + } + fields := strings.Split(line, ",") + lastField := fields[len(fields)-1] + number, err := strconv.Atoi(lastField) + if err != nil { + continue + } + max := float64(limit) * 1024 * 1024 * 1.2 + min := float64(limit) * 1024 * 1024 * 0.8 + if min <= float64(number) && float64(number) <= max { + return true + } + } + return false +} + +var _ = framework.Describe("[group:qos-policy]", func() { + f := framework.NewDefaultFramework("vpc-qos") + + var skip bool + var cs clientset.Interface + var attachNetClient *framework.NetworkAttachmentDefinitionClient + var clusterName string + var vpcClient *framework.VpcClient + var vpcNatGwClient *framework.VpcNatGatewayClient + var subnetClient *framework.SubnetClient + var podClient *framework.PodClient + var ipClient *framework.IpClient + var iptablesEIPClient *framework.IptablesEIPClient + var iptablesFIPClient *framework.IptablesFIPClient + var qosPolicyClient *framework.QoSPolicyClient + + var containerID string + var image string + var net1NicName string + var dockerExtNetName string + + vpcqosParams := newVPCqosParamsInit() + + // docker network + var dockerExtNetNetwork *dockertypes.NetworkResource + + vpcqosParams.vpc1SubnetName = "qos-vpc1-subnet-" + framework.RandomSuffix() + vpcqosParams.vpc2SubnetName = "qos-vpc2-subnet-" + framework.RandomSuffix() + + vpcqosParams.vpcNat1GwName = "qos-gw1-" + framework.RandomSuffix() + vpcqosParams.vpcNat2GwName = "qos-gw2-" + framework.RandomSuffix() + + vpcqosParams.vpc1EIPName = "qos-vpc1-eip-" + framework.RandomSuffix() + vpcqosParams.vpc2EIPName = "qos-vpc2-eip-" + framework.RandomSuffix() + + vpcqosParams.vpc1FIPName = "qos-vpc1-fip-" + framework.RandomSuffix() + vpcqosParams.vpc2FIPName = "qos-vpc2-fip-" + framework.RandomSuffix() + + vpcqosParams.vpc1PodName = "qos-vpc1-pod-" + framework.RandomSuffix() + vpcqosParams.vpc2PodName = "qos-vpc2-pod-" + framework.RandomSuffix() + + vpcqosParams.attachDefName = "qos-ovn-vpc-external-network-" + framework.RandomSuffix() + vpcqosParams.subnetProvider = vpcqosParams.attachDefName + ".kube-system" + + dockerExtNetName = "kube-ovn-qos" + + ginkgo.BeforeEach(func() { + containerID = "" + cs = f.ClientSet + podClient = f.PodClient() + attachNetClient = f.NetworkAttachmentDefinitionClient(framework.KubeOvnNamespace) + subnetClient = f.SubnetClient() + vpcClient = f.VpcClient() + vpcNatGwClient = f.VpcNatGatewayClient() + iptablesEIPClient = f.IptablesEIPClient() + ipClient = f.IpClient() + iptablesFIPClient = f.IptablesFIPClient() + qosPolicyClient = f.QoSPolicyClient() + if image == "" { + image = framework.GetKubeOvnImage(cs) + } + + if skip { + ginkgo.Skip("underlay spec only runs on kind clusters") + } + + if clusterName == "" { + ginkgo.By("Getting k8s nodes") + k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) + framework.ExpectNoError(err) + + cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) + if !ok { + skip = true + ginkgo.Skip("underlay spec only runs on kind clusters") + } + clusterName = cluster + } + + if dockerExtNetNetwork == nil { + ginkgo.By("Ensuring docker network " + dockerExtNetName + " exists") + network, err := docker.NetworkCreate(dockerExtNetName, true, true) + framework.ExpectNoError(err, "creating docker network "+dockerExtNetName) + dockerExtNetNetwork = network + } + + ginkgo.By("Getting kind nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + framework.ExpectNotEmpty(nodes) + + ginkgo.By("Connecting nodes to the docker network") + err = kind.NetworkConnect(dockerExtNetNetwork.ID, nodes) + framework.ExpectNoError(err, "connecting nodes to network "+dockerExtNetName) + + ginkgo.By("Getting node links that belong to the docker network") + nodes, err = kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + + ginkgo.By("Validating node links") + network1, err := docker.NetworkInspect(dockerExtNetName) + framework.ExpectNoError(err) + var eth0Exist, net1Exist bool + for _, node := range nodes { + links, err := node.ListLinks() + framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) + net1Mac := network1.Containers[node.ID].MacAddress + for _, link := range links { + ginkgo.By("exist node nic " + link.IfName) + if link.IfName == "eth0" { + eth0Exist = true + } + if link.Address == net1Mac { + net1NicName = link.IfName + net1Exist = true + } + } + framework.ExpectTrue(eth0Exist) + framework.ExpectTrue(net1Exist) + } + }) + + ginkgo.AfterEach(func() { + if containerID != "" { + ginkgo.By("Deleting container " + containerID) + err := docker.ContainerRemove(containerID) + framework.ExpectNoError(err) + } + + ginkgo.By("Deleting macvlan underlay subnet " + vpcqosParams.attachDefName) + subnetClient.DeleteSync(vpcqosParams.attachDefName) + + // delete net1 attachment definition + ginkgo.By("Deleting nad " + vpcqosParams.attachDefName) + attachNetClient.Delete(vpcqosParams.attachDefName) + + ginkgo.By("Getting nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in cluster") + + if dockerExtNetNetwork != nil { + ginkgo.By("Disconnecting nodes from the docker network") + err = kind.NetworkDisconnect(dockerExtNetNetwork.ID, nodes) + framework.ExpectNoError(err, "disconnecting nodes from network "+dockerExtNetName) + ginkgo.By("Deleting docker network " + dockerExtNetName + " exists") + err := docker.NetworkRemove(dockerExtNetNetwork.ID) + framework.ExpectNoError(err, "deleting docker network "+dockerExtNetName) + } + }) + + iperfServerCmd := []string{"iperf", "-s", "-i", "1", "-p", iperf2Port} + + framework.ConformanceIt("vpc qos", func() { + overlaySubnetV4Cidr := "10.0.0.0/24" + overlaySubnetV4Gw := "10.0.0.1" + lanIp := "10.0.0.254" + setupVpcNatGwTestEnvironment( + f, dockerExtNetNetwork, attachNetClient, + subnetClient, vpcClient, vpcNatGwClient, + vpcqosParams.vpc1Name, vpcqosParams.vpc1SubnetName, vpcqosParams.vpcNat1GwName, + overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIp, + dockerExtNetName, vpcqosParams.attachDefName, net1NicName, + vpcqosParams.subnetProvider, + false, + ) + annotations1 := map[string]string{ + util.LogicalSwitchAnnotation: vpcqosParams.vpc1SubnetName, + } + ginkgo.By("Creating pod " + vpcqosParams.vpc1PodName) + pod1 := framework.MakePod(f.Namespace.Name, vpcqosParams.vpc1PodName, nil, annotations1, framework.AgnhostImage, iperfServerCmd, nil) + pod1 = podClient.CreateSync(pod1) + + ginkgo.By("Creating eip " + vpcqosParams.vpc1EIPName) + eip1 := framework.MakeIptablesEIP(vpcqosParams.vpc1EIPName, "", "", "", vpcqosParams.vpcNat1GwName, vpcqosParams.attachDefName, "") + eip1 = iptablesEIPClient.CreateSync(eip1) + + ginkgo.By("Creating fip " + vpcqosParams.vpc1FIPName) + fip1 := framework.MakeIptablesFIPRule(vpcqosParams.vpc1FIPName, vpcqosParams.vpc1EIPName, pod1.Status.PodIP) + _ = iptablesFIPClient.CreateSync(fip1) + + setupVpcNatGwTestEnvironment( + f, dockerExtNetNetwork, attachNetClient, + subnetClient, vpcClient, vpcNatGwClient, + vpcqosParams.vpc2Name, vpcqosParams.vpc2SubnetName, vpcqosParams.vpcNat2GwName, + overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIp, + dockerExtNetName, vpcqosParams.attachDefName, net1NicName, + vpcqosParams.subnetProvider, + true, + ) + + annotations2 := map[string]string{ + util.LogicalSwitchAnnotation: vpcqosParams.vpc2SubnetName, + } + + ginkgo.By("Creating pod " + vpcqosParams.vpc2PodName) + pod3 := framework.MakePod(f.Namespace.Name, vpcqosParams.vpc2PodName, nil, annotations2, framework.AgnhostImage, iperfServerCmd, nil) + pod3 = podClient.CreateSync(pod3) + + ginkgo.By("Creating eip " + vpcqosParams.vpc2EIPName) + eip3 := framework.MakeIptablesEIP(vpcqosParams.vpc2EIPName, "", "", "", vpcqosParams.vpcNat2GwName, vpcqosParams.attachDefName, "") + eip3 = iptablesEIPClient.CreateSync(eip3) + + ginkgo.By("Creating fip " + vpcqosParams.vpc2FIPName) + fip3 := framework.MakeIptablesFIPRule(vpcqosParams.vpc2FIPName, vpcqosParams.vpc2EIPName, pod3.Status.PodIP) + _ = iptablesFIPClient.CreateSync(fip3) + + // case 1: set qos policy for natgw + // case 2: rebuild qos when natgw pod restart + defaultQoSCases(f, vpcNatGwClient, podClient, qosPolicyClient, pod1, pod3, eip1, eip3, vpcqosParams.vpcNat1GwName) + // case 1: set qos policy for eip + // case 2: update qos policy for eip + // case 3: change qos policy of eip + // case 4: rebuild qos when natgw pod restart + eipQoSCases(f, iptablesEIPClient, podClient, qosPolicyClient, pod1, pod3, eip1, eip3, vpcqosParams.vpc1EIPName, vpcqosParams.vpcNat1GwName) + // case 1: set specific ip qos policy for natgw + specifyingIPQoSCases(f, vpcNatGwClient, qosPolicyClient, pod1, pod3, eip1, eip3, vpcqosParams.vpcNat1GwName) + // case 1: test qos match priority + // case 2: change qos policy of natgw + priorityQoSCases(f, vpcNatGwClient, iptablesEIPClient, qosPolicyClient, pod1, pod3, eip1, eip3, vpcqosParams.vpcNat1GwName, vpcqosParams.vpc1EIPName) + // case 1: test qos when create natgw with qos policy + // case 2: test qos when create eip with qos policy + createNatGwAndSetQosCases(f, + vpcNatGwClient, ipClient, iptablesEIPClient, iptablesFIPClient, + subnetClient, qosPolicyClient, pod1, pod3, eip3, vpcqosParams.vpcNat1GwName, + vpcqosParams.vpc1EIPName, vpcqosParams.vpc1FIPName, vpcqosParams.vpc1Name, + vpcqosParams.vpc1SubnetName, lanIp, vpcqosParams.attachDefName) + + ginkgo.By("Deleting fip " + vpcqosParams.vpc1FIPName) + iptablesFIPClient.DeleteSync(vpcqosParams.vpc1FIPName) + + ginkgo.By("Deleting fip " + vpcqosParams.vpc2FIPName) + iptablesFIPClient.DeleteSync(vpcqosParams.vpc2FIPName) + + ginkgo.By("Deleting eip " + vpcqosParams.vpc1EIPName) + iptablesEIPClient.DeleteSync(vpcqosParams.vpc1EIPName) + + ginkgo.By("Deleting eip " + vpcqosParams.vpc2EIPName) + iptablesEIPClient.DeleteSync(vpcqosParams.vpc2EIPName) + + ginkgo.By("Deleting pod " + vpcqosParams.vpc1PodName) + podClient.DeleteSync(vpcqosParams.vpc1PodName) + + ginkgo.By("Deleting pod " + vpcqosParams.vpc2PodName) + podClient.DeleteSync(vpcqosParams.vpc2PodName) + + ginkgo.By("Deleting custom vpc " + vpcqosParams.vpc1Name) + vpcClient.DeleteSync(vpcqosParams.vpc1Name) + + ginkgo.By("Deleting custom vpc " + vpcqosParams.vpc2Name) + vpcClient.DeleteSync(vpcqosParams.vpc2Name) + + ginkgo.By("Deleting custom vpc nat gw " + vpcqosParams.vpcNat1GwName) + vpcNatGwClient.DeleteSync(vpcqosParams.vpcNat1GwName) + + ginkgo.By("Deleting custom vpc nat gw " + vpcqosParams.vpcNat2GwName) + vpcNatGwClient.DeleteSync(vpcqosParams.vpcNat2GwName) + + // the only pod for vpc nat gateway + vpcNatGw1PodName := "vpc-nat-gw-" + vpcqosParams.vpcNat1GwName + "-0" + + // delete vpc nat gw statefulset remaining ip for eth0 and net2 + overlaySubnet1 := subnetClient.Get(vpcqosParams.vpc1SubnetName) + macvlanSubnet := subnetClient.Get(vpcqosParams.attachDefName) + eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) + net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) + ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) + ipClient.DeleteSync(eth0IpName) + ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) + ipClient.DeleteSync(net1IpName) + ginkgo.By("Deleting overlay subnet " + vpcqosParams.vpc1SubnetName) + subnetClient.DeleteSync(vpcqosParams.vpc1SubnetName) + + vpcNatGw2PodName := "vpc-nat-gw-" + vpcqosParams.vpcNat2GwName + "-0" + overlaySubnet2 := subnetClient.Get(vpcqosParams.vpc2SubnetName) + eth0IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, overlaySubnet2.Spec.Provider) + net1IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) + ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) + ipClient.DeleteSync(eth0IpName) + ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) + ipClient.DeleteSync(net1IpName) + ginkgo.By("Deleting overlay subnet " + vpcqosParams.vpc2SubnetName) + subnetClient.DeleteSync(vpcqosParams.vpc2SubnetName) + + ginkgo.By("Deleting macvlan underlay subnet " + vpcqosParams.attachDefName) + subnetClient.DeleteSync(vpcqosParams.attachDefName) + }) +}) + func init() { klog.SetOutput(ginkgo.GinkgoWriter)