Skip to content

Commit

Permalink
e2e: remove unused functions and variables
Browse files Browse the repository at this point in the history
Signed-off-by: Talor Itzhak <titzhak@redhat.com>
  • Loading branch information
Tal-or committed Apr 24, 2023
1 parent b627fcd commit 56bb427
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 60 deletions.
Expand Up @@ -37,10 +37,6 @@ import (
var workerRTNode *corev1.Node
var profile *performancev2.PerformanceProfile

const (
sysDevicesOnlineCPUs = "/sys/devices/system/cpu/online"
)

var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
var balanceIsolated bool
var reservedCPU, isolatedCPU string
Expand Down Expand Up @@ -599,7 +595,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
// any random existing cpu is fine
cpuID := onlineCPUSet.ToSliceNoSort()[0]
smtLevel := nodes.GetSMTLevel(cpuID, workerRTNode)
hasWP := checkForWorkloadPartitioning(workerRTNode)
hasWP := checkForWorkloadPartitioning()

// Following checks are required to map test_id scenario correctly to the type of node under test
if snoCluster && !RunningOnSingleNode {
Expand Down Expand Up @@ -644,7 +640,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {

})

func checkForWorkloadPartitioning(workerNode *corev1.Node) bool {
func checkForWorkloadPartitioning() bool {
// Look for the correct Workload Partition annotation in
// a crio configuration file on the target node
By("Check for Workload Partitioning enabled")
Expand Down
15 changes: 0 additions & 15 deletions test/e2e/performanceprofile/functests/1_performance/irqbalance.go
Expand Up @@ -354,21 +354,6 @@ func findIrqBalanceBannedCPUsVarFromConf(conf string) string {
return ""
}

func makeBackupForFile(node *corev1.Node, path string) func() {
fullPath := filepath.Join("/", "rootfs", path)
savePath := fullPath + ".save"

out, err := nodes.ExecCommandOnNode([]string{"/usr/bin/cp", "-v", fullPath, savePath}, node)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "%s", out)

return func() {
out, err := nodes.ExecCommandOnNode([]string{"/usr/bin/mv", "-v", savePath, fullPath}, node)
Expect(err).ToNot(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "%s", out)
}
}

func pickNodeIdx(nodes []corev1.Node) int {
name, ok := os.LookupEnv("E2E_PAO_TARGET_NODE")
if !ok {
Expand Down
39 changes: 0 additions & 39 deletions test/e2e/performanceprofile/functests/utils/tuned/tuned.go
Expand Up @@ -24,45 +24,6 @@ import (
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes"
)

func WaitForAppliedCondition(tunedProfileNames []string, conditionStatus corev1.ConditionStatus, timeout time.Duration) error {
return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
for _, tunedProfileName := range tunedProfileNames {
profile := &tunedv1.Profile{}
key := types.NamespacedName{
Name: tunedProfileName,
Namespace: components.NamespaceNodeTuningOperator,
}

if err := testclient.Client.Get(context.TODO(), key, profile); err != nil {
klog.Errorf("failed to get tuned profile %q: %v", tunedProfileName, err)
return false, nil
}

appliedCondition, err := GetConditionByType(profile.Status.Conditions, tunedv1.TunedProfileApplied)
if err != nil {
klog.Errorf("failed to get applied condition for profile %q: %v", tunedProfileName, err)
return false, nil
}

if appliedCondition.Status != conditionStatus {
return false, nil
}
}

return true, nil
})
}

func GetConditionByType(conditions []tunedv1.ProfileStatusCondition, conditionType tunedv1.ProfileConditionType) (*tunedv1.ProfileStatusCondition, error) {
for i := range conditions {
c := &conditions[i]
if c.Type == conditionType {
return c, nil
}
}
return nil, fmt.Errorf("failed to found applied condition under conditions %v", conditions)
}

func GetPod(ctx context.Context, node *corev1.Node) (*corev1.Pod, error) {
podList := &corev1.PodList{}
opts := &client.ListOptions{
Expand Down

0 comments on commit 56bb427

Please sign in to comment.