Skip to content

Commit

Permalink
UPSTREAM: <carry>: Create minimal wrapper needed to run k8s e2e tests
Browse files Browse the repository at this point in the history
  • Loading branch information
soltysh authored and bertinatto committed Jun 9, 2023
1 parent 4a9bfa8 commit b240a39
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 17 deletions.
2 changes: 1 addition & 1 deletion openshift-hack/cmd/k8s-tests/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func initializeTestFramework(provider string) error {

// Ensure that Kube tests run privileged (like they do upstream)
testContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) {
return e2e.CreateTestingNS(baseName, c, labels, true)
return e2e.CreateTestingNS(ctx, baseName, c, labels, true)
}

gomega.RegisterFailHandler(ginkgo.Fail)
Expand Down
5 changes: 4 additions & 1 deletion openshift-hack/e2e/annotate/rules.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ var (
`\[Feature:GPUDevicePlugin\]`,
`\[sig-scheduling\] GPUDevicePluginAcrossRecreate \[Feature:Recreate\]`,

`\[Feature:ImageQuota\]`, // Quota isn't turned on by default, we should do that and then reenable these tests
`\[Feature:LocalStorageCapacityIsolation\]`, // relies on a separate daemonset?
`\[sig-cloud-provider-gcp\]`, // these test require a different configuration - note that GCE tests from the sig-cluster-lifecycle were moved to the sig-cloud-provider-gcpcluster lifecycle see https://github.com/kubernetes/kubernetes/commit/0b3d50b6dccdc4bbd0b3e411c648b092477d79ac#diff-3b1910d08fb8fd8b32956b5e264f87cb

Expand Down Expand Up @@ -151,6 +150,10 @@ var (
`DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy`,
`\[sig-network\] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod`, // TODO(network): simple test in k8s 1.27, needs investigation
`\[sig-cli\] Kubectl client Kubectl prune with applyset should apply and prune objects`, // TODO(workloads): alpha feature in k8s 1.27. It's failing with `error: unknown flag: --applyset`. Needs investigation

// https://issues.redhat.com/browse/OCPBUGS-13392
`\[sig-network\] NetworkPolicyLegacy \[LinuxOnly\] NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`,
`\[sig-network\] NetworkPolicyLegacy \[LinuxOnly\] NetworkPolicy between server and client should enforce updated policy`,
},
// tests that may work, but we don't support them
"[Disabled:Unsupported]": {
Expand Down
30 changes: 15 additions & 15 deletions openshift-hack/e2e/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
)

// CreateTestingNS ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs
func CreateTestingNS(baseName string, c kclientset.Interface, labels map[string]string, isKubeNamespace bool) (*corev1.Namespace, error) {
func CreateTestingNS(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string, isKubeNamespace bool) (*corev1.Namespace, error) {
if !strings.HasPrefix(baseName, "e2e-") {
baseName = "e2e-" + baseName
}
Expand All @@ -35,7 +35,7 @@ func CreateTestingNS(baseName string, c kclientset.Interface, labels map[string]
labels["security.openshift.io/disable-securitycontextconstraints"] = "true"
}

ns, err := framework.CreateTestingNS(context.Background(), baseName, c, labels)
ns, err := framework.CreateTestingNS(ctx, baseName, c, labels)
if err != nil {
return ns, err
}
Expand All @@ -57,37 +57,37 @@ func CreateTestingNS(baseName string, c kclientset.Interface, labels map[string]
framework.Logf("About to run a Kube e2e test, ensuring namespace/%s is privileged", ns.Name)
// add the "privileged" scc to ensure pods that explicitly
// request extra capabilities are not rejected
addE2EServiceAccountsToSCC(securityClient, []corev1.Namespace{*ns}, "privileged")
addE2EServiceAccountsToSCC(ctx, securityClient, []corev1.Namespace{*ns}, "privileged")
// add the "anyuid" scc to ensure pods that don't specify a
// uid don't get forced into a range (mimics upstream
// behavior)
addE2EServiceAccountsToSCC(securityClient, []corev1.Namespace{*ns}, "anyuid")
addE2EServiceAccountsToSCC(ctx, securityClient, []corev1.Namespace{*ns}, "anyuid")
// add the "hostmount-anyuid" scc to ensure pods using hostPath
// can execute tests
addE2EServiceAccountsToSCC(securityClient, []corev1.Namespace{*ns}, "hostmount-anyuid")
addE2EServiceAccountsToSCC(ctx, securityClient, []corev1.Namespace{*ns}, "hostmount-anyuid")

// The intra-pod test requires that the service account have
// permission to retrieve service endpoints.
rbacClient, err := rbacv1client.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
addRoleToE2EServiceAccounts(rbacClient, []corev1.Namespace{*ns}, "view")
addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "view")

// in practice too many kube tests ignore scheduling constraints
allowAllNodeScheduling(c, ns.Name)
allowAllNodeScheduling(ctx, c, ns.Name)

return ns, err
}

var longRetry = wait.Backoff{Steps: 100}

// TODO: ideally this should be rewritten to use dynamic client, not to rely on openshift types
func addE2EServiceAccountsToSCC(securityClient securityv1client.Interface, namespaces []corev1.Namespace, sccName string) {
func addE2EServiceAccountsToSCC(ctx context.Context, securityClient securityv1client.Interface, namespaces []corev1.Namespace, sccName string) {
// Because updates can race, we need to set the backoff retries to be > than the number of possible
// parallel jobs starting at once. Set very high to allow future high parallelism.
err := retry.RetryOnConflict(longRetry, func() error {
scc, err := securityClient.SecurityV1().SecurityContextConstraints().Get(context.Background(), sccName, metav1.GetOptions{})
scc, err := securityClient.SecurityV1().SecurityContextConstraints().Get(ctx, sccName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
return nil
Expand All @@ -98,7 +98,7 @@ func addE2EServiceAccountsToSCC(securityClient securityv1client.Interface, names
for _, ns := range namespaces {
scc.Groups = append(scc.Groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
}
if _, err := securityClient.SecurityV1().SecurityContextConstraints().Update(context.Background(), scc, metav1.UpdateOptions{}); err != nil {
if _, err := securityClient.SecurityV1().SecurityContextConstraints().Update(ctx, scc, metav1.UpdateOptions{}); err != nil {
return err
}
return nil
Expand All @@ -114,11 +114,11 @@ func fatalErr(msg interface{}) {
framework.Failf("%v", msg)
}

func addRoleToE2EServiceAccounts(rbacClient rbacv1client.RbacV1Interface, namespaces []corev1.Namespace, roleName string) {
func addRoleToE2EServiceAccounts(ctx context.Context, rbacClient rbacv1client.RbacV1Interface, namespaces []corev1.Namespace, roleName string) {
err := retry.RetryOnConflict(longRetry, func() error {
for _, ns := range namespaces {
if ns.Status.Phase != corev1.NamespaceTerminating {
_, err := rbacClient.RoleBindings(ns.Name).Create(context.Background(), &rbacv1.RoleBinding{
_, err := rbacClient.RoleBindings(ns.Name).Create(ctx, &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{GenerateName: "default-" + roleName, Namespace: ns.Name},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Expand All @@ -141,17 +141,17 @@ func addRoleToE2EServiceAccounts(rbacClient rbacv1client.RbacV1Interface, namesp
}

// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto.
func allowAllNodeScheduling(c kclientset.Interface, namespace string) {
func allowAllNodeScheduling(ctx context.Context, c kclientset.Interface, namespace string) {
err := retry.RetryOnConflict(longRetry, func() error {
ns, err := c.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{})
ns, err := c.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
if err != nil {
return err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
ns.Annotations[projectv1.ProjectNodeSelector] = ""
_, err = c.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{})
_, err = c.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{})
return err
})
if err != nil {
Expand Down

0 comments on commit b240a39

Please sign in to comment.