Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e: overhaul Performance-Addon-Operator tests #590

Merged
merged 8 commits into from Jul 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
36 changes: 21 additions & 15 deletions test/e2e/performanceprofile/functests/0_config/config.go
Expand Up @@ -3,12 +3,12 @@ package __performance_config
import (
"context"
"fmt"
"io/ioutil"
"os"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/format"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -57,7 +57,7 @@ var _ = Describe("[performance][config] Performance configuration", Ordered, fun
deploymentSpecs := csv.Spec.InstallStrategy.StrategySpec.DeploymentSpecs
if deploymentSpecs != nil {
for _, deployment := range deploymentSpecs {
Expect((deployment.Name)).ToNot(Equal("performance-operator"), fmt.Sprintf("CSV %s for performance-operator should have been removed", csv.Name))
Expect(deployment.Name).ToNot(Equal("performance-operator"), fmt.Sprintf("CSV %s for performance-operator should have been removed", csv.Name))
}
}
}
Expand Down Expand Up @@ -128,26 +128,32 @@ var _ = Describe("[performance][config] Performance configuration", Ordered, fun
By("Waiting for MCP being updated")
mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue)

Expect(testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(performanceProfile), performanceProfile))
By("Printing the updated profile")
format.Object(performanceProfile, 2)
})

})

func externalPerformanceProfile(performanceManifest string) (*performancev2.PerformanceProfile, error) {
performanceScheme := runtime.NewScheme()
performancev2.AddToScheme(performanceScheme)
err := performancev2.AddToScheme(performanceScheme)
if err != nil {
return nil, err
}

decode := serializer.NewCodecFactory(performanceScheme).UniversalDeserializer().Decode
manifest, err := ioutil.ReadFile(performanceManifest)
manifest, err := os.ReadFile(performanceManifest)
if err != nil {
return nil, fmt.Errorf("Failed to read %s file", performanceManifest)
return nil, fmt.Errorf("failed to read %s file", performanceManifest)
}
obj, _, err := decode([]byte(manifest), nil, nil)
obj, _, err := decode(manifest, nil, nil)
if err != nil {
return nil, fmt.Errorf("Failed to read the manifest file %s", performanceManifest)
return nil, fmt.Errorf("failed to read the manifest file %s", performanceManifest)
}
profile, ok := obj.(*performancev2.PerformanceProfile)
if !ok {
return nil, fmt.Errorf("Failed to convert manifest file to profile")
return nil, fmt.Errorf("failed to convert manifest file to profile")
}
return profile, nil
}
Expand Down Expand Up @@ -176,7 +182,7 @@ func testProfile() *performancev2.PerformanceProfile {
{
Size: "1G",
Count: 1,
Node: pointer.Int32Ptr(0),
Node: pointer.Int32(0),
},
{
Size: "2M",
Expand All @@ -186,18 +192,18 @@ func testProfile() *performancev2.PerformanceProfile {
},
NodeSelector: testutils.NodeSelectorLabels,
RealTimeKernel: &performancev2.RealTimeKernel{
Enabled: pointer.BoolPtr(true),
Enabled: pointer.Bool(true),
},
NUMA: &performancev2.NUMA{
TopologyPolicy: pointer.StringPtr("single-numa-node"),
TopologyPolicy: pointer.String("single-numa-node"),
},
Net: &performancev2.Net{
UserLevelNetworking: pointer.BoolPtr(true),
UserLevelNetworking: pointer.Bool(true),
},
WorkloadHints: &performancev2.WorkloadHints{
RealTime: pointer.BoolPtr(true),
HighPowerConsumption: pointer.BoolPtr(false),
PerPodPowerManagement: pointer.BoolPtr(false),
RealTime: pointer.Bool(true),
HighPowerConsumption: pointer.Bool(false),
PerPodPowerManagement: pointer.Bool(false),
},
},
}
Expand Down
Expand Up @@ -37,10 +37,6 @@ import (
var workerRTNode *corev1.Node
var profile *performancev2.PerformanceProfile

const (
sysDevicesOnlineCPUs = "/sys/devices/system/cpu/online"
)

var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
var balanceIsolated bool
var reservedCPU, isolatedCPU string
Expand Down Expand Up @@ -68,7 +64,9 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
Expect(err).ToNot(HaveOccurred())

By(fmt.Sprintf("Checking the profile %s with cpus %s", profile.Name, cpuSpecToString(profile.Spec.CPU)))
cpus, err := cpuSpecToString(profile.Spec.CPU)
Expect(err).ToNot(HaveOccurred(), "failed to parse cpu %v spec to string", cpus)
By(fmt.Sprintf("Checking the profile %s with cpus %s", profile.Name, cpus))
balanceIsolated = true
if profile.Spec.CPU.BalanceIsolated != nil {
balanceIsolated = *profile.Spec.CPU.BalanceIsolated
Expand Down Expand Up @@ -319,7 +317,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
containerCgroup, err = nodes.ExecCommandOnNode(cmd, workerRTNode)
Expect(err).ToNot(HaveOccurred())
return containerCgroup
}, (cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)), 5*time.Second).ShouldNot(BeEmpty(),
}).WithTimeout(cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)).WithPolling(5*time.Second).ShouldNot(BeEmpty(),
fmt.Sprintf("cannot find cgroup for container %q", containerID))

By("Checking what CPU the pod is using")
Expand Down Expand Up @@ -434,7 +432,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
}
}
return true
}, (cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)), 5*time.Second).Should(BeTrue(),
}).WithTimeout(cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)).WithPolling(5*time.Second).Should(BeTrue(),
fmt.Sprintf("IRQ still active on CPU%s", psr))

By("Checking that after removing POD default smp affinity is returned back to all active CPUs")
Expand All @@ -453,7 +451,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
testpod = pods.GetTestPod()
testpod.Namespace = testutils.NamespaceTesting
testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name}
testpod.Spec.ShareProcessNamespace = pointer.BoolPtr(true)
testpod.Spec.ShareProcessNamespace = pointer.Bool(true)

err := testclient.Client.Create(context.TODO(), testpod)
Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -597,7 +595,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
// any random existing cpu is fine
cpuID := onlineCPUSet.ToSliceNoSort()[0]
smtLevel := nodes.GetSMTLevel(cpuID, workerRTNode)
hasWP := checkForWorkloadPartitioning(workerRTNode)
hasWP := checkForWorkloadPartitioning()

// Following checks are required to map test_id scenario correctly to the type of node under test
if snoCluster && !RunningOnSingleNode {
Expand Down Expand Up @@ -642,7 +640,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {

})

func checkForWorkloadPartitioning(workerNode *corev1.Node) bool {
func checkForWorkloadPartitioning() bool {
// Look for the correct Workload Partition annotation in
// a crio configuration file on the target node
By("Check for Workload Partitioning enabled")
Expand Down Expand Up @@ -832,21 +830,30 @@ func deleteTestPod(testpod *corev1.Pod) {
Expect(err).ToNot(HaveOccurred())
}

func cpuSpecToString(cpus *performancev2.CPU) string {
func cpuSpecToString(cpus *performancev2.CPU) (string, error) {
Tal-or marked this conversation as resolved.
Show resolved Hide resolved
if cpus == nil {
return "<nil>"
return "", fmt.Errorf("performance CPU field is nil")
}
sb := strings.Builder{}
if cpus.Reserved != nil {
fmt.Fprintf(&sb, "reserved=[%s]", *cpus.Reserved)
_, err := fmt.Fprintf(&sb, "reserved=[%s]", *cpus.Reserved)
if err != nil {
return "", err
}
}
if cpus.Isolated != nil {
fmt.Fprintf(&sb, " isolated=[%s]", *cpus.Isolated)
_, err := fmt.Fprintf(&sb, " isolated=[%s]", *cpus.Isolated)
if err != nil {
return "", err
}
}
if cpus.BalanceIsolated != nil {
fmt.Fprintf(&sb, " balanceIsolated=%t", *cpus.BalanceIsolated)
_, err := fmt.Fprintf(&sb, " balanceIsolated=%t", *cpus.BalanceIsolated)
if err != nil {
return "", err
}
}
return sb.String()
return sb.String(), nil
}

func logEventsForPod(testPod *corev1.Pod) {
Expand Down
15 changes: 0 additions & 15 deletions test/e2e/performanceprofile/functests/1_performance/irqbalance.go
Expand Up @@ -352,21 +352,6 @@ func findIrqBalanceBannedCPUsVarFromConf(conf string) string {
return ""
}

func makeBackupForFile(node *corev1.Node, path string) func() {
fullPath := filepath.Join("/", "rootfs", path)
savePath := fullPath + ".save"

out, err := nodes.ExecCommandOnNode([]string{"/usr/bin/cp", "-v", fullPath, savePath}, node)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "%s", out)

return func() {
out, err := nodes.ExecCommandOnNode([]string{"/usr/bin/mv", "-v", savePath, fullPath}, node)
Expect(err).ToNot(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "%s", out)
}
}

func pickNodeIdx(nodes []corev1.Node) int {
name, ok := os.LookupEnv("E2E_PAO_TARGET_NODE")
if !ok {
Expand Down
26 changes: 13 additions & 13 deletions test/e2e/performanceprofile/functests/1_performance/performance.go
Expand Up @@ -147,7 +147,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() {
re := regexp.MustCompile(`tuned.non_isolcpus=\S+`)
nonIsolcpusFullArgument := re.FindString(string(cmdline))
Expect(nonIsolcpusFullArgument).To(ContainSubstring("tuned.non_isolcpus="), "tuned.non_isolcpus parameter not found in %q", cmdline)
nonIsolcpusMask := strings.Split(string(nonIsolcpusFullArgument), "=")[1]
nonIsolcpusMask := strings.Split(nonIsolcpusFullArgument, "=")[1]
nonIsolcpusMaskNoDelimiters := strings.Replace(nonIsolcpusMask, ",", "", -1)

getTrimmedMaskFromData := func(maskType string, data []byte) string {
Expand Down Expand Up @@ -462,10 +462,10 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() {
},
NodeSelector: map[string]string{newLabel: ""},
RealTimeKernel: &performancev2.RealTimeKernel{
Enabled: pointer.BoolPtr(true),
Enabled: pointer.Bool(true),
},
NUMA: &performancev2.NUMA{
TopologyPolicy: pointer.StringPtr("restricted"),
TopologyPolicy: pointer.String("restricted"),
},
},
}
Expand Down Expand Up @@ -558,13 +558,13 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() {
},
NodeSelector: map[string]string{newLabel: ""},
RealTimeKernel: &performancev2.RealTimeKernel{
Enabled: pointer.BoolPtr(true),
Enabled: pointer.Bool(true),
},
AdditionalKernelArgs: []string{
"NEW_ARGUMENT",
},
NUMA: &performancev2.NUMA{
TopologyPolicy: pointer.StringPtr("restricted"),
TopologyPolicy: pointer.String("restricted"),
},
},
}
Expand Down Expand Up @@ -786,7 +786,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() {
profile.Name = testProfileName
profile.ResourceVersion = ""
profile.Spec.NodeSelector = map[string]string{"test/test": "test"}
profile.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(globallyDisableIrqLoadBalancing)
profile.Spec.GloballyDisableIrqLoadBalancing = pointer.Bool(globallyDisableIrqLoadBalancing)
profile.Spec.MachineConfigPoolSelector = nil
profile.Spec.MachineConfigLabel = nil

Expand Down Expand Up @@ -907,11 +907,11 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() {
},
Spec: performancev1alpha1.PerformanceProfileSpec{
RealTimeKernel: &performancev1alpha1.RealTimeKernel{
Enabled: pointer.BoolPtr(true),
Enabled: pointer.Bool(true),
},
NodeSelector: map[string]string{"v1alpha1/v1alpha1": "v1alpha1"},
NUMA: &performancev1alpha1.NUMA{
TopologyPolicy: pointer.StringPtr("restricted"),
TopologyPolicy: pointer.String("restricted"),
},
},
}
Expand Down Expand Up @@ -966,11 +966,11 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() {
},
Spec: performancev1.PerformanceProfileSpec{
RealTimeKernel: &performancev1.RealTimeKernel{
Enabled: pointer.BoolPtr(true),
Enabled: pointer.Bool(true),
},
NodeSelector: map[string]string{"v1/v1": "v1"},
NUMA: &performancev1.NUMA{
TopologyPolicy: pointer.StringPtr("restricted"),
TopologyPolicy: pointer.String("restricted"),
},
},
}
Expand Down Expand Up @@ -1025,11 +1025,11 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() {
},
Spec: performancev2.PerformanceProfileSpec{
RealTimeKernel: &performancev2.RealTimeKernel{
Enabled: pointer.BoolPtr(true),
Enabled: pointer.Bool(true),
},
NodeSelector: map[string]string{"v2/v2": "v2"},
NUMA: &performancev2.NUMA{
TopologyPolicy: pointer.StringPtr("restricted"),
TopologyPolicy: pointer.String("restricted"),
},
},
}
Expand Down Expand Up @@ -1241,7 +1241,7 @@ func verifyV2Conversion(v2Profile *performancev2.PerformanceProfile, v1Profile *
}
if specCPU.Offlined != nil {
if string(*specCPU.Offlined) != string(*v1Profile.Spec.CPU.Offlined) {
return fmt.Errorf("Offlined CPUs are different [v2: %s, v1: %s]",
return fmt.Errorf("offlined CPUs are different [v2: %s, v1: %s]",
*specCPU.Offlined, *v1Profile.Spec.CPU.Offlined)
}
}
Expand Down
Expand Up @@ -122,7 +122,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance

By("Modifying profile")
profile.Spec.CPU = &performancev2.CPU{
BalanceIsolated: pointer.BoolPtr(false),
BalanceIsolated: pointer.Bool(false),
Reserved: &reserved,
Isolated: &isolated,
}
Expand All @@ -132,17 +132,17 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
{
Count: hpCntOnNuma0,
Size: hpSize2M,
Node: pointer.Int32Ptr(0),
Node: pointer.Int32(0),
},
{
Count: hpCntOnNuma1,
Size: hpSize2M,
Node: pointer.Int32Ptr(1),
Node: pointer.Int32(1),
},
},
}
profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{
Enabled: pointer.BoolPtr(true),
Enabled: pointer.Bool(true),
}

By("Verifying that mcp is ready for update")
Expand Down Expand Up @@ -230,15 +230,15 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
},
}
profile.Spec.CPU = &performancev2.CPU{
BalanceIsolated: pointer.BoolPtr(false),
BalanceIsolated: pointer.Bool(false),
Reserved: &reserved,
Isolated: &isolated,
}
profile.Spec.NUMA = &performancev2.NUMA{
TopologyPolicy: &policy,
}
profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{
Enabled: pointer.BoolPtr(false),
Enabled: pointer.Bool(false),
}

if profile.Spec.AdditionalKernelArgs == nil {
Expand Down Expand Up @@ -864,7 +864,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
totalCpus := fmt.Sprintf("%s,%s", reservedCpus, isolatedCpus)
totalCpuSlice := strings.Split(totalCpus, ",")
// get partial cpus from the combined cpus
partialCpulist := (totalCpuSlice[:len(totalCpuSlice)/2])
partialCpulist := totalCpuSlice[:len(totalCpuSlice)/2]
offlineCpus := strings.Join(partialCpulist, ",")
// Create new performance with offlined
reservedSet := performancev2.CPUSet(reservedCpus)
Expand Down Expand Up @@ -1137,13 +1137,13 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance
It("[test_id:54191]Verify RPS Mask is not applied when RealtimeHint is disabled", func() {
By("Modifying profile")
profile.Spec.WorkloadHints = &performancev2.WorkloadHints{
HighPowerConsumption: pointer.BoolPtr(false),
RealTime: pointer.BoolPtr(false),
PerPodPowerManagement: pointer.BoolPtr(false),
HighPowerConsumption: pointer.Bool(false),
RealTime: pointer.Bool(false),
PerPodPowerManagement: pointer.Bool(false),
}

profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{
Enabled: pointer.BoolPtr(false),
Enabled: pointer.Bool(false),
}
By("Updating the performance profile")
profiles.UpdateWithRetry(profile)
Expand Down