diff --git a/openshift/tests-extension/Makefile b/openshift/tests-extension/Makefile index 429773a3d..5bc982051 100644 --- a/openshift/tests-extension/Makefile +++ b/openshift/tests-extension/Makefile @@ -78,6 +78,7 @@ pkg/bindata/catalog/catalog.go: $(shell find testdata/catalog -type f) mkdir -p $(@D) $(GO_BINDATA) -pkg catalog -o $@ -prefix "testdata/catalog" testdata/catalog/... go fmt ./$(@D)/... + bindata: pkg/bindata/webhook/bundle/bundle.go pkg/bindata/webhook/bundle/bundle.go: $(shell find testdata/webhook/bundle -type f) mkdir -p $(@D) diff --git a/openshift/tests-extension/pkg/bindata/catalog/catalog.go b/openshift/tests-extension/pkg/bindata/catalog/catalog.go index 80456efdc..b83d5fc0b 100644 --- a/openshift/tests-extension/pkg/bindata/catalog/catalog.go +++ b/openshift/tests-extension/pkg/bindata/catalog/catalog.go @@ -94,7 +94,7 @@ func dockerfile() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "Dockerfile", size: 97, mode: os.FileMode(420), modTime: time.Unix(1755277748, 0)} + info := bindataFileInfo{name: "Dockerfile", size: 97, mode: os.FileMode(420), modTime: time.Unix(1760017176, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -114,7 +114,7 @@ func configsIndexignore() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "configs/.indexignore", size: 4, mode: os.FileMode(420), modTime: time.Unix(1755262416, 0)} + info := bindataFileInfo{name: "configs/.indexignore", size: 4, mode: os.FileMode(420), modTime: time.Unix(1760017176, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -134,7 +134,7 @@ func configsIndexYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "configs/index.yaml", size: 669, mode: os.FileMode(420), modTime: time.Unix(1759944950, 0)} + info := bindataFileInfo{name: "configs/index.yaml", size: 669, mode: os.FileMode(420), modTime: time.Unix(1760123493, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/openshift/tests-extension/pkg/bindata/webhook/bundle/bundle.go b/openshift/tests-extension/pkg/bindata/webhook/bundle/bundle.go index 4e428e368..1acc74d13 100644 --- a/openshift/tests-extension/pkg/bindata/webhook/bundle/bundle.go +++ b/openshift/tests-extension/pkg/bindata/webhook/bundle/bundle.go @@ -102,7 +102,7 @@ func dockerfile() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "Dockerfile", size: 888, mode: os.FileMode(420), modTime: time.Unix(1760064351, 0)} + info := bindataFileInfo{name: "Dockerfile", size: 888, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -122,7 +122,7 @@ func manifestsWebhookOperatorControllerManagerMetricsService_v1_serviceYaml() (* return nil, err } - info := bindataFileInfo{name: "manifests/webhook-operator-controller-manager-metrics-service_v1_service.yaml", size: 469, mode: os.FileMode(420), modTime: time.Unix(1760064210, 0)} + info := bindataFileInfo{name: "manifests/webhook-operator-controller-manager-metrics-service_v1_service.yaml", size: 469, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -142,7 +142,7 @@ func manifestsWebhookOperatorMetricsReader_rbacAuthorizationK8sIo_v1_clusterrole return nil, err } - info := bindataFileInfo{name: "manifests/webhook-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 191, mode: os.FileMode(420), modTime: time.Unix(1760064210, 0)} + info := bindataFileInfo{name: "manifests/webhook-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 191, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -162,7 +162,7 @@ func manifestsWebhookOperatorWebhookService_v1_serviceYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/webhook-operator-webhook-service_v1_service.yaml", size: 395, mode: os.FileMode(420), modTime: time.Unix(1760064210, 0)} + info := bindataFileInfo{name: "manifests/webhook-operator-webhook-service_v1_service.yaml", size: 395, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -182,7 +182,7 @@ func manifestsWebhookOperatorWebhooktestAdminRole_rbacAuthorizationK8sIo_v1_clus return nil, err } - info := bindataFileInfo{name: "manifests/webhook-operator-webhooktest-admin-role_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 440, mode: os.FileMode(420), modTime: time.Unix(1760064210, 0)} + info := bindataFileInfo{name: "manifests/webhook-operator-webhooktest-admin-role_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 440, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -202,7 +202,7 @@ func manifestsWebhookOperatorWebhooktestEditorRole_rbacAuthorizationK8sIo_v1_clu return nil, err } - info := bindataFileInfo{name: "manifests/webhook-operator-webhooktest-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 503, mode: os.FileMode(420), modTime: time.Unix(1760064210, 0)} + info := bindataFileInfo{name: "manifests/webhook-operator-webhooktest-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 503, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -222,7 +222,7 @@ func manifestsWebhookOperatorWebhooktestViewerRole_rbacAuthorizationK8sIo_v1_clu return nil, err } - info := bindataFileInfo{name: "manifests/webhook-operator-webhooktest-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 460, mode: os.FileMode(420), modTime: time.Unix(1760064210, 0)} + info := bindataFileInfo{name: "manifests/webhook-operator-webhooktest-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml", size: 460, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -242,7 +242,7 @@ func manifestsWebhookOperatorClusterserviceversionYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/webhook-operator.clusterserviceversion.yaml", size: 8694, mode: os.FileMode(420), modTime: time.Unix(1760064904, 0)} + info := bindataFileInfo{name: "manifests/webhook-operator.clusterserviceversion.yaml", size: 8694, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -262,7 +262,7 @@ func manifestsWebhookOperatorsCoreosIo_webhooktestsYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/webhook.operators.coreos.io_webhooktests.yaml", size: 12240, mode: os.FileMode(420), modTime: time.Unix(1760064210, 0)} + info := bindataFileInfo{name: "manifests/webhook.operators.coreos.io_webhooktests.yaml", size: 12240, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -282,7 +282,7 @@ func metadataAnnotationsYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "metadata/annotations.yaml", size: 740, mode: os.FileMode(420), modTime: time.Unix(1760064439, 0)} + info := bindataFileInfo{name: "metadata/annotations.yaml", size: 740, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -302,7 +302,7 @@ func testsScorecardConfigYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "tests/scorecard/config.yaml", size: 1614, mode: os.FileMode(420), modTime: time.Unix(1760064391, 0)} + info := bindataFileInfo{name: "tests/scorecard/config.yaml", size: 1614, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/openshift/tests-extension/pkg/bindata/webhook/index/index.go b/openshift/tests-extension/pkg/bindata/webhook/index/index.go index 9064419b2..7eff667c9 100644 --- a/openshift/tests-extension/pkg/bindata/webhook/index/index.go +++ b/openshift/tests-extension/pkg/bindata/webhook/index/index.go @@ -93,7 +93,7 @@ func dockerfile() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "Dockerfile", size: 97, mode: os.FileMode(420), modTime: time.Unix(1760065068, 0)} + info := bindataFileInfo{name: "Dockerfile", size: 97, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -113,7 +113,7 @@ func configsIndexYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "configs/index.yaml", size: 1122, mode: os.FileMode(420), modTime: time.Unix(1760066144, 0)} + info := bindataFileInfo{name: "configs/index.yaml", size: 1122, mode: os.FileMode(420), modTime: time.Unix(1760231456, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/openshift/tests-extension/pkg/helpers/in_cluster_bundles.go b/openshift/tests-extension/pkg/helpers/in_cluster_bundles.go new file mode 100644 index 000000000..e5a3aeb6a --- /dev/null +++ b/openshift/tests-extension/pkg/helpers/in_cluster_bundles.go @@ -0,0 +1,396 @@ +package helpers + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "time" + + //nolint:staticcheck // ST1001: dot-imports for readability + . "github.com/onsi/ginkgo/v2" + //nolint:staticcheck // ST1001: dot-imports for readability + . "github.com/onsi/gomega" + + buildv1 "github.com/openshift/api/build/v1" + imagev1 "github.com/openshift/api/image/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + olmv1 "github.com/operator-framework/operator-controller/api/v1" + + "github.com/openshift/operator-framework-operator-controller/openshift/tests-extension/pkg/env" +) + +// NewCatalogAndClusterBundles creates bundle and catalog images in-cluster. +// +// The replacements parameter allows callers to control which template variables +// should be replaced. To have this function automatically fill in a value, +// add the key with an empty string value. For example: +// +// replacements := map[string]string{ +// "{{ NAMESPACE }}": "", // Will be auto-filled with the generated namespace name +// "{{ TEST-BUNDLE }}": "", // Will be auto-filled with the generated bundle/operator name +// "{{ TEST-CONTROLLER }}": "my-controller:latest", +// } +// +// Supported auto-fill keys: +// - "{{ NAMESPACE }}" - will be filled with the generated namespace name +// - "{{ TEST-BUNDLE }}" - will be filled with the generated bundle/operator name +func NewCatalogAndClusterBundles(ctx SpecContext, replacements map[string]string, + getAssetNamesCatalog func() []string, getAssetCatalog func(string) ([]byte, error), + getAssetNamesBundle func() []string, getAssetBundle func(string) ([]byte, error), +) (string, string, string, string) { + RequireOLMv1CapabilityOnOpenshift() + unique := rand.String(8) + nsName := "install-test-ns-" + unique + ccName := "install-test-cc-" + unique + opName := "install-test-op-" + unique + rbName := "install-test-rb-" + unique + + By(fmt.Sprintf("setting a unique value: %q", unique)) + + // Auto-fill empty values in replacements map based on key patterns + // This allows callers to control which variables they want to use + for key, value := range replacements { + if value == "" { + // Check common patterns for namespace + if key == "{{ NAMESPACE }}" { + replacements[key] = nsName + } + // Check for bundle/operator name + if key == "{{ TEST-BUNDLE }}" { + replacements[key] = opName + } + // Future: could add more auto-fill patterns here + } + } + + By("creating a new Namespace") + createNamespace(nsName) + + // The builder (and deployer) service accounts are created by OpenShift itself which injects them in the NS. + By(fmt.Sprintf("waiting for builder serviceaccount in %s", nsName)) + ExpectServiceAccountExists(ctx, "builder", nsName) + + By(fmt.Sprintf("waiting for deployer serviceaccount in %s", nsName)) + ExpectServiceAccountExists(ctx, "deployer", nsName) + + By("applying image-puller RoleBinding") + createImagePullerRoleBinding(rbName, nsName) + + By("creating the operator BuildConfig") + createBuildConfig(opName, nsName) + + By("creating the operator ImageStream") + createImageStream(opName, nsName) + + By("creating the operator tarball") + fileOperator := createTempTarBall(replacements, getAssetNamesBundle, getAssetBundle) + By(fmt.Sprintf("created operator tarball %q", fileOperator)) + + By("starting the operator build via RAW URL") + opArgs := []string{ + "create", + "--raw", + fmt.Sprintf( + "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", + nsName, opName, opName, nsName, + ), + "-f", + fileOperator, + } + buildOperator := startBuild(opArgs...) + + By(fmt.Sprintf("waiting for the build %q to finish", buildOperator.Name)) + waitForBuildToFinish(ctx, buildOperator.Name, nsName) + + By("creating the catalog BuildConfig") + createBuildConfig(ccName, nsName) + + By("creating the catalog ImageStream") + createImageStream(ccName, nsName) + + By("creating the catalog tarball") + fileCatalog := createTempTarBall(replacements, getAssetNamesCatalog, getAssetCatalog) + By(fmt.Sprintf("created catalog tarball %q", fileCatalog)) + + By("starting the catalog build via RAW URL") + catalogArgs := []string{ + "create", + "--raw", + fmt.Sprintf( + "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", + nsName, ccName, ccName, nsName, + ), + "-f", + fileCatalog, + } + buildCatalog := startBuild(catalogArgs...) + + By(fmt.Sprintf("waiting for the build %q to finish", buildCatalog.Name)) + waitForBuildToFinish(ctx, buildCatalog.Name, nsName) + + By("creating the ClusterCatalog") + createClusterCatalog(ccName, nsName) + + // using named returns + return unique, nsName, ccName, opName +} + +func createClusterCatalog(name, namespace string) { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + cc := &olmv1.ClusterCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: olmv1.ClusterCatalogSpec{ + AvailabilityMode: olmv1.AvailabilityModeAvailable, + Priority: 0, + Source: olmv1.CatalogSource{ + Type: olmv1.SourceTypeImage, + Image: &olmv1.ImageSource{ + PollIntervalMinutes: ptr.To(int(600)), + Ref: fmt.Sprintf("image-registry.openshift-image-registry.svc:5000/%s/%s:latest", namespace, name), + }, + }, + }, + } + + Expect(k8sClient.Create(ctx, cc)).To(Succeed(), "failed to create ClusterCatalog") + DeferCleanup(func() { + By(fmt.Sprintf("deleting ClusterCatalog %q", name)) + Expect(k8sClient.Delete(context.Background(), cc)).To(Succeed()) + }) + waitForClusterCatalogServing(ctx, cc.Name) +} + +func createImagePullerRoleBinding(name, namespace string) { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "system:image-puller", + }, + Subjects: []rbacv1.Subject{ + { + APIGroup: "rbac.authorization.k8s.io", + Kind: "Group", + Name: "system:serviceaccounts:openshift-catalogd", + }, + { + APIGroup: "rbac.authorization.k8s.io", + Kind: "Group", + Name: "system:serviceaccounts:openshift-operator-controller", + }, + }, + } + Expect(k8sClient.Create(ctx, rb)).To(Succeed(), "failed to create image-puller RoleBinding") + DeferCleanup(func() { + By(fmt.Sprintf("deleting image-puller RoleBinding %q", name)) + _ = k8sClient.Delete(ctx, rb) + }) +} + +func createNamespace(namespace string) { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + Expect(k8sClient.Create(ctx, ns)).To(Succeed(), "failed to create Namespace: %q", namespace) + DeferCleanup(func() { + By(fmt.Sprintf("deleting Namespace %q", namespace)) + _ = k8sClient.Delete(context.Background(), ns) + }) +} + +func createImageStream(name, namespace string) { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + is := &imagev1.ImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "name": name, + }, + }, + } + + Expect(k8sClient.Create(ctx, is)).To(Succeed(), "failed to create ImageStream: %q", name) + DeferCleanup(func() { + By(fmt.Sprintf("deleting ImageStream %q", name)) + _ = k8sClient.Delete(context.Background(), is) + }) +} + +func createBuildConfig(name, namespace string) { + ctx := context.Background() + k8sClient := env.Get().K8sClient + + bc := &buildv1.BuildConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "name": name, + }, + }, + Spec: buildv1.BuildConfigSpec{ + CommonSpec: buildv1.CommonSpec{ + Source: buildv1.BuildSource{ + Type: buildv1.BuildSourceBinary, + }, + Strategy: buildv1.BuildStrategy{ + Type: buildv1.DockerBuildStrategyType, + DockerStrategy: &buildv1.DockerBuildStrategy{ + ForcePull: true, + From: &corev1.ObjectReference{ + Kind: "DockerImage", + Name: "scratch", + }, + Env: []corev1.EnvVar{ + { + Name: "BUILD_LOGLEVEL", + Value: "5", + }, + }, + }, + }, + Output: buildv1.BuildOutput{ + To: &corev1.ObjectReference{ + Kind: "ImageStreamTag", + Name: name + ":latest", + }, + }, + }, + }, + } + + Expect(k8sClient.Create(ctx, bc)).To(Succeed(), "failed to create BuildConfig: %q", name) + DeferCleanup(func() { + By(fmt.Sprintf("deleting BuildConfig %q", name)) + _ = k8sClient.Delete(context.Background(), bc) + }) +} + +func waitForBuildToFinish(ctx SpecContext, name, namespace string) { + const typeBuildConditionComplete = "Complete" + k8sClient := env.Get().K8sClient + Eventually(func(g Gomega) { + b := &buildv1.Build{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, b) + g.Expect(err).ToNot(HaveOccurred()) + + conditions := b.Status.Conditions + var cond *buildv1.BuildCondition + for i := range conditions { + if conditions[i].Type == typeBuildConditionComplete { + cond = &conditions[i] + break + } + } + g.Expect(cond).ToNot(BeNil()) + g.Expect(cond.Status).To(Equal(corev1.ConditionTrue)) + }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) + + DeferCleanup(func() { + if CurrentSpecReport().Failed() { + if CurrentSpecReport().Failed() { + RunAndPrint(context.Background(), "get", "build", name, "-n", namespace, "-oyaml") + RunAndPrint(context.Background(), "logs", fmt.Sprintf("build/%s", name), "-n", namespace, "--tail=200") + } + } + }) +} + +func waitForClusterCatalogServing(ctx context.Context, name string) { + k8sClient := env.Get().K8sClient + Eventually(func(g Gomega) { + cc := &olmv1.ClusterCatalog{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: name}, cc) + g.Expect(err).ToNot(HaveOccurred()) + + serving := meta.FindStatusCondition(cc.Status.Conditions, olmv1.TypeServing) + g.Expect(serving).ToNot(BeNil()) + g.Expect(serving.Status).To(Equal(metav1.ConditionTrue)) + }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) +} + +func startBuild(args ...string) *buildv1.Build { + output, err := RunK8sCommand(context.Background(), args...) + Expect(err).To(Succeed(), printExitError(err)) + + /* The output is JSON of a build.build.openshift.io resource */ + build := &buildv1.Build{} + Expect(json.Unmarshal(output, build)).To(Succeed(), "failed to unmarshal build") + return build +} + +func printExitError(err error) string { + if err == nil { + return "" + } + exiterr := &exec.ExitError{} + if errors.As(err, &exiterr) { + return fmt.Sprintf("ExitError.Stderr: %q", string(exiterr.Stderr)) + } + return err.Error() +} + +func createTempTarBall(replacements map[string]string, getAssetNames func() []string, getAsset func(string) ([]byte, error)) string { + file, err := os.CreateTemp("", "bundle-*.tar") + Expect(err).To(Succeed()) + filename := file.Name() + + namesCatalog := getAssetNames() + twCatalog := tar.NewWriter(file) + for _, name := range namesCatalog { + data, err := getAsset(name) + Expect(err).To(Succeed()) + for k, v := range replacements { + data = bytes.ReplaceAll(data, []byte(k), []byte(v)) + } + hdr := &tar.Header{ + Name: name, + Size: int64(len(data)), + Mode: 0o644, + } + err = twCatalog.WriteHeader(hdr) + Expect(err).To(Succeed()) + _, err = twCatalog.Write(data) + Expect(err).To(Succeed()) + } + Expect(twCatalog.Close()).To(Succeed(), "failed to close tar writer for file %q", filename) + Expect(file.Close()).To(Succeed(), "failed to close tar file %q", filename) + + DeferCleanup(func() { + By(fmt.Sprintf("deleting file %q", filename)) + Expect(os.Remove(filename)).To(Succeed()) + }) + return filename +} diff --git a/openshift/tests-extension/test/olmv1-incompatible.go b/openshift/tests-extension/test/olmv1-incompatible.go index 921e1b1fd..c26dbce1a 100644 --- a/openshift/tests-extension/test/olmv1-incompatible.go +++ b/openshift/tests-extension/test/olmv1-incompatible.go @@ -1,14 +1,7 @@ package test import ( - "archive/tar" - "bytes" "context" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" "time" //nolint:staticcheck // ST1001: dot-imports for readability @@ -16,21 +9,11 @@ import ( //nolint:staticcheck // ST1001: dot-imports for readability . "github.com/onsi/gomega" - buildv1 "github.com/openshift/api/build/v1" configv1 "github.com/openshift/api/config/v1" - imagev1 "github.com/openshift/api/image/v1" operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/origin/test/extended/util/image" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - olmv1 "github.com/operator-framework/operator-controller/api/v1" - catalogdata "github.com/openshift/operator-framework-operator-controller/openshift/tests-extension/pkg/bindata/catalog" operatordata "github.com/openshift/operator-framework-operator-controller/openshift/tests-extension/pkg/bindata/operator" "github.com/openshift/operator-framework-operator-controller/openshift/tests-extension/pkg/env" @@ -38,14 +21,22 @@ import ( ) var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation", func() { - var unique, nsName, ccName, rbName, opName string - BeforeEach(func() { - helpers.RequireOLMv1CapabilityOnOpenshift() - unique = rand.String(8) - nsName = "install-test-ns-" + unique - ccName = "install-test-cc-" + unique - rbName = "install-test-rb-" + unique - opName = "install-test-op-" + unique + var unique, nsName, ccName, opName string + BeforeEach(func(ctx SpecContext) { + testVersion := env.Get().OpenShiftVersion + replacements := map[string]string{ + "{{ TEST-BUNDLE }}": "", // Auto-filled + "{{ NAMESPACE }}": "", // Auto-filled + "{{ VERSION }}": testVersion, + + // Using the shell image provided by origin as the controller image. + // The image is mirrored into disconnected environments for testing. + "{{ TEST-CONTROLLER }}": image.ShellImage(), + } + unique, nsName, ccName, opName = helpers.NewCatalogAndClusterBundles(ctx, replacements, + catalogdata.AssetNames, catalogdata.Asset, + operatordata.AssetNames, operatordata.Asset, + ) }) AfterEach(func(ctx SpecContext) { @@ -61,97 +52,6 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation Skip("Requires OCP APIs: not OpenShift") } - By(fmt.Sprintf("setting a unique value: %q", unique)) - - testVersion := env.Get().OpenShiftVersion - replacements := map[string]string{ - "{{ TEST-BUNDLE }}": opName, - "{{ NAMESPACE }}": nsName, - "{{ VERSION }}": testVersion, - - // Using the shell image provided by origin as the controller image. - // The image is mirrored into disconnected environments for testing. - "{{ TEST-CONTROLLER }}": image.ShellImage(), - } - By(fmt.Sprintf("testing against OCP %s", testVersion)) - - By("creating a new Namespace") - nsCleanup := createNamespace(nsName) - DeferCleanup(nsCleanup) - - // The builder (and deployer) service accounts are created by OpenShift itself which injects them in the NS. - By(fmt.Sprintf("waiting for builder serviceaccount in %s", nsName)) - helpers.ExpectServiceAccountExists(ctx, "builder", nsName) - - By(fmt.Sprintf("waiting for deployer serviceaccount in %s", nsName)) - helpers.ExpectServiceAccountExists(ctx, "deployer", nsName) - - By("applying image-puller RoleBinding") - rbCleanup := createImagePullerRoleBinding(rbName, nsName) - DeferCleanup(rbCleanup) - - By("creating the operator BuildConfig") - bcCleanup := createBuildConfig(opName, nsName) - DeferCleanup(bcCleanup) - - By("creating the operator ImageStream") - isCleanup := createImageStream(opName, nsName) - DeferCleanup(isCleanup) - - By("creating the operator tarball") - fileOperator, fileCleanup := createTempTarBall(replacements, operatordata.AssetNames, operatordata.Asset) - DeferCleanup(fileCleanup) - By(fmt.Sprintf("created operator tarball %q", fileOperator)) - - By("starting the operator build via RAW URL") - opArgs := []string{ - "create", - "--raw", - fmt.Sprintf( - "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", - nsName, opName, opName, nsName, - ), - "-f", - fileOperator, - } - buildOperator := startBuild(opArgs...) - - By(fmt.Sprintf("waiting for the build %q to finish", buildOperator.Name)) - waitForBuildToFinish(ctx, buildOperator.Name, nsName) - - By("creating the catalog BuildConfig") - bcCleanup = createBuildConfig(ccName, nsName) - DeferCleanup(bcCleanup) - - By("creating the catalog ImageStream") - isCleanup = createImageStream(ccName, nsName) - DeferCleanup(isCleanup) - - By("creating the catalog tarball") - fileCatalog, fileCleanup := createTempTarBall(replacements, catalogdata.AssetNames, catalogdata.Asset) - DeferCleanup(fileCleanup) - By(fmt.Sprintf("created catalog tarball %q", fileCatalog)) - - By("starting the catalog build via RAW URL") - catalogArgs := []string{ - "create", - "--raw", - fmt.Sprintf( - "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", - nsName, ccName, ccName, nsName, - ), - "-f", - fileCatalog, - } - buildCatalog := startBuild(catalogArgs...) - - By(fmt.Sprintf("waiting for the build %q to finish", buildCatalog.Name)) - waitForBuildToFinish(ctx, buildCatalog.Name, nsName) - - By("creating the ClusterCatalog") - ccCleanup := createClusterCatalog(ccName, nsName) - DeferCleanup(ccCleanup) - By("waiting for InstalledOLMOperatorUpgradable to be true") waitForOlmUpgradeStatus(ctx, operatorv1.ConditionTrue, "") @@ -168,200 +68,6 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation }) }) -func createClusterCatalog(name, namespace string) func() { - ctx := context.Background() - k8sClient := env.Get().K8sClient - - cc := &olmv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: olmv1.ClusterCatalogSpec{ - AvailabilityMode: olmv1.AvailabilityModeAvailable, - Priority: 0, - Source: olmv1.CatalogSource{ - Type: olmv1.SourceTypeImage, - Image: &olmv1.ImageSource{ - PollIntervalMinutes: ptr.To(int(600)), - Ref: fmt.Sprintf("image-registry.openshift-image-registry.svc:5000/%s/%s:latest", namespace, name), - }, - }, - }, - } - - Expect(k8sClient.Create(ctx, cc)).To(Succeed(), "failed to create ClusterCatalog") - waitForClusterCatalogServing(ctx, cc.Name) - return func() { - By(fmt.Sprintf("deleting ClusterCatalog %q", name)) - Expect(k8sClient.Delete(context.Background(), cc)).To(Succeed()) - } -} - -func createImagePullerRoleBinding(name, namespace string) func() { - ctx := context.Background() - k8sClient := env.Get().K8sClient - - rb := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "system:image-puller", - }, - Subjects: []rbacv1.Subject{ - { - APIGroup: "rbac.authorization.k8s.io", - Kind: "Group", - Name: "system:serviceaccounts:openshift-catalogd", - }, - { - APIGroup: "rbac.authorization.k8s.io", - Kind: "Group", - Name: "system:serviceaccounts:openshift-operator-controller", - }, - }, - } - Expect(k8sClient.Create(ctx, rb)).To(Succeed(), "failed to create image-puller RoleBinding") - return func() { - By(fmt.Sprintf("deleting image-puller RoleBinding %q", name)) - _ = k8sClient.Delete(ctx, rb) - } -} - -func createNamespace(namespace string) func() { - ctx := context.Background() - k8sClient := env.Get().K8sClient - - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - } - - Expect(k8sClient.Create(ctx, ns)).To(Succeed(), "failed to create Namespace: %q", namespace) - return func() { - By(fmt.Sprintf("deleting Namespace %q", namespace)) - _ = k8sClient.Delete(context.Background(), ns) - } -} - -func createImageStream(name, namespace string) func() { - ctx := context.Background() - k8sClient := env.Get().K8sClient - - is := &imagev1.ImageStream{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "name": name, - }, - }, - } - - Expect(k8sClient.Create(ctx, is)).To(Succeed(), "failed to create ImageStream: %q", name) - return func() { - By(fmt.Sprintf("deleting ImageStream %q", name)) - _ = k8sClient.Delete(context.Background(), is) - } -} - -func createBuildConfig(name, namespace string) func() { - ctx := context.Background() - k8sClient := env.Get().K8sClient - - bc := &buildv1.BuildConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "name": name, - }, - }, - Spec: buildv1.BuildConfigSpec{ - CommonSpec: buildv1.CommonSpec{ - Source: buildv1.BuildSource{ - Type: buildv1.BuildSourceBinary, - }, - Strategy: buildv1.BuildStrategy{ - Type: buildv1.DockerBuildStrategyType, - DockerStrategy: &buildv1.DockerBuildStrategy{ - ForcePull: true, - From: &corev1.ObjectReference{ - Kind: "DockerImage", - Name: "scratch", - }, - Env: []corev1.EnvVar{ - { - Name: "BUILD_LOGLEVEL", - Value: "5", - }, - }, - }, - }, - Output: buildv1.BuildOutput{ - To: &corev1.ObjectReference{ - Kind: "ImageStreamTag", - Name: name + ":latest", - }, - }, - }, - }, - } - - Expect(k8sClient.Create(ctx, bc)).To(Succeed(), "failed to create BuildConfig: %q", name) - return func() { - By(fmt.Sprintf("deleting BuildConfig %q", name)) - _ = k8sClient.Delete(context.Background(), bc) - } -} - -func waitForBuildToFinish(ctx SpecContext, name, namespace string) { - const typeBuildConditionComplete = "Complete" - k8sClient := env.Get().K8sClient - Eventually(func(g Gomega) { - b := &buildv1.Build{} - err := k8sClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, b) - g.Expect(err).ToNot(HaveOccurred()) - - conditions := b.Status.Conditions - var cond *buildv1.BuildCondition - for i := range conditions { - if conditions[i].Type == typeBuildConditionComplete { - cond = &conditions[i] - break - } - } - g.Expect(cond).ToNot(BeNil()) - g.Expect(cond.Status).To(Equal(corev1.ConditionTrue)) - }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) - - DeferCleanup(func() { - if CurrentSpecReport().Failed() { - if CurrentSpecReport().Failed() { - helpers.RunAndPrint(context.Background(), "get", "build", name, "-n", namespace, "-oyaml") - helpers.RunAndPrint(context.Background(), "logs", fmt.Sprintf("build/%s", name), "-n", namespace, "--tail=200") - } - } - }) -} - -func waitForClusterCatalogServing(ctx context.Context, name string) { - k8sClient := env.Get().K8sClient - Eventually(func(g Gomega) { - cc := &olmv1.ClusterCatalog{} - err := k8sClient.Get(ctx, client.ObjectKey{Name: name}, cc) - g.Expect(err).ToNot(HaveOccurred()) - - serving := meta.FindStatusCondition(cc.Status.Conditions, olmv1.TypeServing) - g.Expect(serving).ToNot(BeNil()) - g.Expect(serving.Status).To(Equal(metav1.ConditionTrue)) - }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) -} - func waitForOlmUpgradeStatus(ctx SpecContext, status operatorv1.ConditionStatus, name string) { const reasonIncompatibleOperatorsInstalled = "IncompatibleOperatorsInstalled" const typeInstalledOLMOperatorsUpgradeable = "InstalledOLMOperatorsUpgradeable" @@ -412,56 +118,3 @@ func waitForClusterOperatorUpgradable(ctx SpecContext, name string) { g.Expect(cond.Message).To(ContainSubstring(name)) }).WithTimeout(5 * time.Minute).WithPolling(1 * time.Second).Should(Succeed()) } - -func startBuild(args ...string) *buildv1.Build { - output, err := helpers.RunK8sCommand(context.Background(), args...) - Expect(err).To(Succeed(), printExitError(err)) - - /* The output is JSON of a build.build.openshift.io resource */ - build := &buildv1.Build{} - Expect(json.Unmarshal(output, build)).To(Succeed(), "failed to unmarshal build") - return build -} - -func printExitError(err error) string { - if err == nil { - return "" - } - exiterr := &exec.ExitError{} - if errors.As(err, &exiterr) { - return fmt.Sprintf("ExitError.Stderr: %q", string(exiterr.Stderr)) - } - return err.Error() -} - -func createTempTarBall(replacements map[string]string, getAssetNames func() []string, getAsset func(string) ([]byte, error)) (string, func()) { - file, err := os.CreateTemp("", "bundle-*.tar") - Expect(err).To(Succeed()) - filename := file.Name() - - namesCatalog := getAssetNames() - twCatalog := tar.NewWriter(file) - for _, name := range namesCatalog { - data, err := getAsset(name) - Expect(err).To(Succeed()) - for k, v := range replacements { - data = bytes.ReplaceAll(data, []byte(k), []byte(v)) - } - hdr := &tar.Header{ - Name: name, - Size: int64(len(data)), - Mode: 0o644, - } - err = twCatalog.WriteHeader(hdr) - Expect(err).To(Succeed()) - _, err = twCatalog.Write(data) - Expect(err).To(Succeed()) - } - Expect(twCatalog.Close()).To(Succeed(), "failed to close tar writer for file %q", filename) - Expect(file.Close()).To(Succeed(), "failed to close tar file %q", filename) - - return filename, func() { - By(fmt.Sprintf("deleting file %q", filename)) - Expect(os.Remove(filename)).To(Succeed()) - } -} diff --git a/openshift/tests-extension/test/olmv1.go b/openshift/tests-extension/test/olmv1.go index d5e0ae818..77f1f6ee5 100644 --- a/openshift/tests-extension/test/olmv1.go +++ b/openshift/tests-extension/test/olmv1.go @@ -125,104 +125,22 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 }) var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation", func() { - var unique, nsName, ccName, rbName, opName string + var unique, nsName, ccName, opName string BeforeEach(func(ctx SpecContext) { - helpers.RequireOLMv1CapabilityOnOpenshift() - unique = rand.String(8) - nsName = "install-test-ns-" + unique - ccName = "install-test-cc-" + unique - rbName = "install-test-rb-" + unique - opName = "install-test-op-" + unique - - By(fmt.Sprintf("setting a unique value: %q", unique)) - testVersion := env.Get().OpenShiftVersion replacements := map[string]string{ - "{{ TEST-BUNDLE }}": opName, - "{{ NAMESPACE }}": nsName, + "{{ TEST-BUNDLE }}": "", // Auto-filled + "{{ NAMESPACE }}": "", // Auto-filled "{{ VERSION }}": testVersion, // Using the shell image provided by origin as the controller image. // The image is mirrored into disconnected environments for testing. "{{ TEST-CONTROLLER }}": image.ShellImage(), } - - By("creating a new Namespace") - nsCleanup := createNamespace(nsName) - DeferCleanup(nsCleanup) - - // The builder (and deployer) service accounts are created by OpenShift itself which injects them in the NS. - By(fmt.Sprintf("waiting for builder serviceaccount in %s", nsName)) - helpers.ExpectServiceAccountExists(ctx, "builder", nsName) - - By(fmt.Sprintf("waiting for deployer serviceaccount in %s", nsName)) - helpers.ExpectServiceAccountExists(ctx, "deployer", nsName) - - By("applying image-puller RoleBinding") - rbCleanup := createImagePullerRoleBinding(rbName, nsName) - DeferCleanup(rbCleanup) - - By("creating the operator BuildConfig") - bcCleanup := createBuildConfig(opName, nsName) - DeferCleanup(bcCleanup) - - By("creating the operator ImageStream") - isCleanup := createImageStream(opName, nsName) - DeferCleanup(isCleanup) - - By("creating the operator tarball") - fileOperator, fileCleanup := createTempTarBall(replacements, operatordata.AssetNames, operatordata.Asset) - DeferCleanup(fileCleanup) - By(fmt.Sprintf("created operator tarball %q", fileOperator)) - - By("starting the operator build via RAW URL") - opArgs := []string{ - "create", - "--raw", - fmt.Sprintf( - "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", - nsName, opName, opName, nsName, - ), - "-f", - fileOperator, - } - buildOperator := startBuild(opArgs...) - - By(fmt.Sprintf("waiting for the build %q to finish", buildOperator.Name)) - waitForBuildToFinish(ctx, buildOperator.Name, nsName) - - By("creating the catalog BuildConfig") - bcCleanup = createBuildConfig(ccName, nsName) - DeferCleanup(bcCleanup) - - By("creating the catalog ImageStream") - isCleanup = createImageStream(ccName, nsName) - DeferCleanup(isCleanup) - - By("creating the catalog tarball") - fileCatalog, fileCleanup := createTempTarBall(replacements, catalogdata.AssetNames, catalogdata.Asset) - DeferCleanup(fileCleanup) - By(fmt.Sprintf("created catalog tarball %q", fileCatalog)) - - By("starting the catalog build via RAW URL") - catalogArgs := []string{ - "create", - "--raw", - fmt.Sprintf( - "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", - nsName, ccName, ccName, nsName, - ), - "-f", - fileCatalog, - } - buildCatalog := startBuild(catalogArgs...) - - By(fmt.Sprintf("waiting for the build %q to finish", buildCatalog.Name)) - waitForBuildToFinish(ctx, buildCatalog.Name, nsName) - - By("creating the ClusterCatalog") - ccCleanup := createClusterCatalog(ccName, nsName) - DeferCleanup(ccCleanup) + unique, nsName, ccName, opName = helpers.NewCatalogAndClusterBundles(ctx, replacements, + catalogdata.AssetNames, catalogdata.Asset, + operatordata.AssetNames, operatordata.Asset, + ) }) AfterEach(func(ctx SpecContext) { diff --git a/openshift/tests-extension/test/webhooks.go b/openshift/tests-extension/test/webhooks.go index 6fa8211dc..ef7e2cf28 100644 --- a/openshift/tests-extension/test/webhooks.go +++ b/openshift/tests-extension/test/webhooks.go @@ -45,7 +45,7 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLMWebhookProviderOpenshiftServi k8sClient client.Client dynamicClient dynamic.Interface webhookOperatorInstallNamespace string - unique string + catalogName string ) BeforeEach(func(ctx SpecContext) { @@ -65,102 +65,21 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLMWebhookProviderOpenshiftServi By("ensuring no ClusterExtension and CRD from a previous run") helpers.EnsureCleanupClusterExtension(ctx, webhookOperatorPackageName, webhookOperatorCRDName) - unique = rand.String(8) - nsName := "webhook-olm-ns-" + unique - rbName := "webhook-olm-rb-" + unique - catalogName := webhookCatalogName + "-" + unique - bundleName := webhookOperatorPackageName + // Build webhook operator bundle and catalog using the consolidated helper + // Note: {{ TEST-BUNDLE }} and {{ NAMESPACE }} will be auto-filled replacements := map[string]string{ - "{{ TEST-BUNDLE }}": bundleName, - "{{ NAMESPACE }}": nsName, + "{{ TEST-BUNDLE }}": "", // Auto-filled + "{{ NAMESPACE }}": "", // Auto-filled "{{ TEST-CONTROLLER }}": image.LocationFor("quay.io/olmtest/webhook-operator:v0.0.5"), } - // Create namespace for building images - By("creating a new Namespace for builds") - nsCleanup := createNamespace(nsName) - DeferCleanup(nsCleanup) - - By(fmt.Sprintf("waiting for builder serviceaccount in %s", nsName)) - helpers.ExpectServiceAccountExists(ctx, "builder", nsName) - - By(fmt.Sprintf("waiting for deployer serviceaccount in %s", nsName)) - helpers.ExpectServiceAccountExists(ctx, "deployer", nsName) - - By("applying image-puller RoleBinding") - rbCleanup := createImagePullerRoleBinding(rbName, nsName) - DeferCleanup(rbCleanup) - - // Build bundle image - By("creating the operator bundle BuildConfig") - bcBundleCleanup := createBuildConfig(bundleName, nsName) - DeferCleanup(bcBundleCleanup) - - By("creating the operator bundle ImageStream") - isBundleCleanup := createImageStream(bundleName, nsName) - DeferCleanup(isBundleCleanup) - - By("creating the operator bundle tarball") - fileOperatorBundle, fileCleanupBundle := createTempTarBall(replacements, webhookbundle.AssetNames, webhookbundle.Asset) - DeferCleanup(fileCleanupBundle) - By(fmt.Sprintf("created operator bundle tarball %q", fileOperatorBundle)) - - By("starting the operator build via RAW URL") - opArgs := []string{ - "create", - "--raw", - fmt.Sprintf( - "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", - nsName, bundleName, bundleName, nsName, - ), - "-f", - fileOperatorBundle, - } - buildOperatorBundle := startBuild(opArgs...) - - By(fmt.Sprintf("waiting for the build %q to finish", buildOperatorBundle.Name)) - waitForBuildToFinish(ctx, buildOperatorBundle.Name, nsName) - - // Build index image - By("creating the catalog BuildConfig") - bcIndexCleanup := createBuildConfig(catalogName, nsName) - DeferCleanup(bcIndexCleanup) - - By("creating the catalog ImageStream") - isIndexCleanup := createImageStream(catalogName, nsName) - DeferCleanup(isIndexCleanup) - - By("creating the catalog tarball") - fileCatalogIndex, fileCleanupIndex := createTempTarBall(replacements, webhookindex.AssetNames, webhookindex.Asset) - DeferCleanup(fileCleanupIndex) - By(fmt.Sprintf("created catalog tarball %q", fileCatalogIndex)) - - By("starting the catalog build via RAW URL") - indexArgs := []string{ - "create", - "--raw", - fmt.Sprintf( - "/apis/build.openshift.io/v1/namespaces/%s/buildconfigs/%s/instantiatebinary?name=%s&namespace=%s", - nsName, catalogName, catalogName, nsName, - ), - "-f", - fileCatalogIndex, - } - buildCatalogIndex := startBuild(indexArgs...) - - By(fmt.Sprintf("waiting for the build %q to finish", buildCatalogIndex.Name)) - waitForBuildToFinish(ctx, buildCatalogIndex.Name, nsName) - - // Create ClusterCatalog - By("creating the ClusterCatalog") - catalogCleanup := createClusterCatalog(catalogName, nsName) - DeferCleanup(func(ctx context.Context) { - catalogCleanup() - }) - - By("waiting for the webhook-operator catalog to be serving") - helpers.ExpectCatalogToBeServing(ctx, catalogName) + var nsName, opName string + _, nsName, catalogName, opName = helpers.NewCatalogAndClusterBundles(ctx, replacements, + webhookindex.AssetNames, webhookindex.Asset, + webhookbundle.AssetNames, webhookbundle.Asset, + ) + By(fmt.Sprintf("webhook bundle %q and catalog %q built successfully in namespace %q", opName, catalogName, nsName)) // Create ClusterExtension in a separate namespace // setupWebhookOperator now registers its own DeferCleanup handlers internally