From 84813a357074942edb243c421862078a61e8f2d8 Mon Sep 17 00:00:00 2001 From: Predrag Knezevic Date: Thu, 30 Oct 2025 16:32:15 +0100 Subject: [PATCH 1/2] Migrate e2e tests to Godog BDD framework Replace traditional Go e2e tests with Godog (Cucumber for Go) to improve test readability and maintainability through behavior-driven development. Changes: - Convert existing test scenarios to Gherkin feature files - Implement reusable step definitions in steps/steps.go - Add scenario hooks for setup/teardown and feature gate detection - Provide comprehensive documentation in test/e2e/README.md - Remove legacy test files (cluster_extension_install_test.go, etc.) Benefits: - Human-readable test scenarios serve as living documentation - Better separation between test specification and implementation - Easier collaboration between technical and non-technical stakeholders - Reduced code duplication through reusable step definitions Assisted-By: Claude " --- go.mod | 12 +- go.sum | 29 + test/e2e/README.md | 333 ++++++++ test/e2e/cluster_extension_install_test.go | 798 ------------------ test/e2e/e2e_suite_test.go | 74 -- test/e2e/features/install.feature | 272 ++++++ test/e2e/features/metrics.feature | 14 + test/e2e/features/recover.feature | 117 +++ test/e2e/features/update.feature | 247 ++++++ test/e2e/features_test.go | 81 ++ test/e2e/metrics_test.go | 253 ------ test/e2e/network_policy_test.go | 34 + test/e2e/single_namespace_support_test.go | 412 --------- test/e2e/steps/hooks.go | 122 +++ test/e2e/steps/steps.go | 531 ++++++++++++ .../testdata/cluster-admin-rbac-template.yaml | 24 + test/e2e/steps/testdata/extra-catalog.yaml | 11 + .../metrics-reader-rbac-template.yaml | 24 + test/e2e/steps/testdata/rbac-template.yaml | 77 ++ test/e2e/steps/testdata/test-catalog.yaml | 11 + test/e2e/webhook_support_test.go | 237 ------ 21 files changed, 1938 insertions(+), 1775 deletions(-) create mode 100644 test/e2e/README.md delete mode 100644 test/e2e/cluster_extension_install_test.go delete mode 100644 test/e2e/e2e_suite_test.go create mode 100644 test/e2e/features/install.feature create mode 100644 test/e2e/features/metrics.feature create mode 100644 test/e2e/features/recover.feature create mode 100644 test/e2e/features/update.feature create mode 100644 test/e2e/features_test.go delete mode 100644 test/e2e/metrics_test.go delete mode 100644 test/e2e/single_namespace_support_test.go create mode 100644 test/e2e/steps/hooks.go create mode 100644 test/e2e/steps/steps.go create mode 100644 test/e2e/steps/testdata/cluster-admin-rbac-template.yaml create mode 100644 test/e2e/steps/testdata/extra-catalog.yaml create mode 100644 test/e2e/steps/testdata/metrics-reader-rbac-template.yaml create mode 100644 test/e2e/steps/testdata/rbac-template.yaml create mode 100644 test/e2e/steps/testdata/test-catalog.yaml delete mode 100644 test/e2e/webhook_support_test.go diff --git a/go.mod b/go.mod index b94dd33696..fd6cb16d99 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,8 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/cert-manager/cert-manager v1.18.2 github.com/containerd/containerd v1.7.29 + github.com/cucumber/godog v0.15.1 + github.com/evanphx/json-patch v5.9.11+incompatible github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.3 github.com/golang-jwt/jwt/v5 v5.3.0 @@ -86,6 +88,8 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect + github.com/cucumber/gherkin/go/v26 v26.2.0 // indirect + github.com/cucumber/messages/go/v21 v21.0.1 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/cyphar/filepath-securejoin v0.6.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -97,7 +101,6 @@ require ( github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect - github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.18.0 // indirect @@ -110,6 +113,7 @@ require ( github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.22.0 // indirect github.com/go-openapi/jsonreference v0.21.1 // indirect github.com/go-openapi/swag v0.24.1 // indirect @@ -126,6 +130,7 @@ require ( github.com/go-openapi/swag/yamlutils v0.24.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/uuid v4.3.1+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -142,7 +147,10 @@ require ( github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-memdb v1.3.4 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -223,6 +231,8 @@ require ( go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.podman.io/common v0.66.0 // indirect go.podman.io/storage v1.61.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.45.0 // indirect diff --git a/go.sum b/go.sum index 1db7e05168..94698c1f34 100644 --- a/go.sum +++ b/go.sum @@ -85,9 +85,17 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cucumber/gherkin/go/v26 v26.2.0 h1:EgIjePLWiPeslwIWmNQ3XHcypPsWAHoMCz/YEBKP4GI= +github.com/cucumber/gherkin/go/v26 v26.2.0/go.mod h1:t2GAPnB8maCT4lkHL99BDCVNzCh1d7dBhCLt150Nr/0= +github.com/cucumber/godog v0.15.1 h1:rb/6oHDdvVZKS66hrhpjFQFHjthFSrQBCOI1LwshNTI= +github.com/cucumber/godog v0.15.1/go.mod h1:qju+SQDewOljHuq9NSM66s0xEhogx0q30flfxL4WUk8= +github.com/cucumber/messages/go/v21 v21.0.1 h1:wzA0LxwjlWQYZd32VTlAVDTkW6inOFmSM+RuOwHZiMI= +github.com/cucumber/messages/go/v21 v21.0.1/go.mod h1:zheH/2HS9JLVFukdrsPWoPdmUtmYQAQPLk7w5vWsk5s= +github.com/cucumber/messages/go/v22 v22.0.0/go.mod h1:aZipXTKc0JnjCsXrJnuZpWhtay93k7Rn3Dee7iyPJjs= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= @@ -202,6 +210,9 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= @@ -274,8 +285,19 @@ github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= +github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -304,8 +326,11 @@ github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3J github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -457,8 +482,11 @@ github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -478,6 +506,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 0000000000..cc93c5577b --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,333 @@ +# E2E Tests - Godog Framework + +This directory contains end-to-end (e2e) tests, written using the [Godog](https://github.com/cucumber/godog) framework. + +## Overview + +### What is Godog/BDD/Cucumber? + +Godog is a Behavior-Driven Development (BDD) framework that allows you to write tests in a human-readable format called +[Gherkin](https://cucumber.io/docs/gherkin/reference/). Tests are written as scenarios using Given-When-Then syntax, making them accessible to both technical and +non-technical stakeholders. + +**Benefits:** + +- **Readable**: Tests serve as living documentation +- **Maintainable**: Reusable step definitions reduce code duplication +- **Collaborative**: Product owners and developers share the same test specifications +- **Structured**: Clear separation between test scenarios and implementation + +## Project Structure + +``` +test/e2e/ +├── README.md # This file +├── features_test.go # Test runner and suite initialization +├── features/ # Gherkin feature files +│ ├── install.feature # ClusterExtension installation scenarios +│ ├── update.feature # ClusterExtension update scenarios +│ ├── recover.feature # Recovery scenarios +│ └── metrics.feature # Metrics endpoint scenarios +└── steps/ # Step definitions and test utilities + ├── steps.go # Step definition implementations + ├── hooks.go # Test hooks and scenario context + └── testdata/ # Test data (RBAC templates, catalogs) + ├── rbac-template.yaml + ├── cluster-admin-rbac-template.yaml + ├── test-catalog.yaml + └── ... +``` + +## Architecture + +### 1. Test Runner (`features_test.go`) + +The main test entry point that configures and runs the Godog test suite. + +### 2. Feature Files (`features/*.feature`) + +Gherkin files that describe test scenarios in natural language. + +**Structure:** + +```gherkin +Feature: [Feature Name] + [Feature description] + + Background: + [Common setup steps for all scenarios] + + Scenario: [Scenario Name] + Given [precondition] + When [action] + Then [expected result] + And [additional assertions] +``` + +**Example:** + +```gherkin +Feature: Install ClusterExtension + + Background: + Given OLM is available + And "test" catalog serves bundles + And Service account "olm-sa" with needed permissions is available in test namespace + + Scenario: Install latest available version from the default channel + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + ... + """ + Then ClusterExtension is rolled out + And ClusterExtension is available +``` + +### 3. Step Definitions (`steps/steps.go`) + +Go functions that implement the steps defined in feature files. Each step is registered with a regex pattern that +matches the Gherkin text. + +**Registration:** + +```go +func RegisterSteps(sc *godog.ScenarioContext) { +sc.Step(`^OLM is available$`, OLMisAvailable) +sc.Step(`^bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled) +sc.Step(`^ClusterExtension is applied$`, ResourceIsApplied) +// ... more steps +} +``` + +**Step Implementation Pattern:** + +```go +func BundleInstalled(ctx context.Context, name, version string) error { + sc := scenarioCtx(ctx) + waitFor(ctx, func () bool { + v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}") + if err != nil { + return false + } + var bundle map[string]interface{} + json.Unmarshal([]byte(v), &bundle) + return bundle["name"] == name && bundle["version"] == version + }) + return nil +} +``` + +### 4. Hooks and Context (`steps/hooks.go`) + +Manages test lifecycle and scenario-specific context. + +**Hooks:** + +- `CheckFeatureTags`: Skips scenarios based on feature gate tags (e.g., `@WebhookProviderCertManager`) +- `CreateScenarioContext`: Creates unique namespace and names for each scenario +- `ScenarioCleanup`: Cleans up resources after each scenario + +**Variable Substitution:** + +Replaces `$TEST_NAMESPACE` and `$NAME` with scenario-specific values. + +## Writing Tests + +### 1. Create a Feature File + +Create a new `.feature` file in `test/e2e/features/`: + +```gherkin +Feature: Your Feature Name + Description of what this feature tests + + Background: + Given OLM is available + And "test" catalog serves bundles + + Scenario: Your scenario description + When [some action] + Then [expected outcome] +``` + +### 2. Implement Step Definitions + +Add step implementations in `steps/steps.go`: + +```go +func RegisterSteps(sc *godog.ScenarioContext) { + // ... existing steps + sc.Step(`^your step pattern "([^"]+)"$`, YourStepFunction) +} + +func YourStepFunction(ctx context.Context, param string) error { + sc := scenarioCtx(ctx) + // Implementation + return nil +} +``` + +### 3. Use Existing Steps + +Leverage existing steps for common operations: + +- **Setup**: `Given OLM is available`, `And "test" catalog serves bundles` +- **Resource Management**: `When ClusterExtension is applied`, `And resource is applied` +- **Assertions**: `Then ClusterExtension is available`, `And bundle "..." is installed` +- **Conditions**: `Then ClusterExtension reports Progressing as True with Reason Retrying:` + +### 4. Variable Substitution + +Use these variables in YAML templates: + +- `$NAME`: Scenario-specific ClusterExtension name (e.g., `ce-123`) +- `$TEST_NAMESPACE`: Scenario-specific namespace (e.g., `ns-123`) + +### 5. Feature Tags + +Use tags to conditionally run scenarios based on feature gates: + +```gherkin +@WebhookProviderCertManager +Scenario: Install operator having webhooks +``` + +Scenarios are skipped if the feature gate is not enabled on the deployed controller. + +## Running Tests + +### Run All Tests + +```bash +make test-e2e +``` + +or + +```bash +make test-experimental-e2e +``` + + +### Run Specific Feature + +```bash +go test test/e2e/features_test.go -- features/install.feature +``` + +### Run Specific Scenario by Tag + +```bash +go test test/e2e/features_test.go --godog.tags="@WebhookProviderCertManager" +``` + +### Run with Debug Logging + +```bash +go test -v test/e2e/features_test.go --log.debug +``` + +### CLI Options + +Godog options can be passed after `--`: + +```bash +go test test/e2e/features_test.go \ + --godog.format=pretty \ + --godog.tags="@WebhookProviderCertManager" +``` + +Available formats: `pretty`, `cucumber`, `progress`, `junit` + +### Environment Variables + +- `KUBECONFIG`: Path to kubeconfig file (defaults to `~/.kube/config`) +- `E2E_SUMMARY_OUTPUT`: Path to write test summary (optional) +- `LOCAL_REGISTRY_HOST`: Local registry for catalog images + +## Design Patterns + +### 1. Scenario Isolation + +Each scenario runs in its own namespace with unique resource names, ensuring complete isolation: + +- Namespace: `ns-{scenario-id}` +- ClusterExtension: `ce-{scenario-id}` + +### 2. Automatic Cleanup + +The `ScenarioCleanup` hook ensures all resources are deleted after each scenario: + +- Kills background processes (e.g., kubectl port-forward) +- Deletes ClusterExtensions +- Deletes namespaces +- Deletes added resources + +### 3. Declarative Resource Management + +Resources are managed declaratively using YAML templates embedded in feature files as docstrings: + +```gherkin +When ClusterExtension is applied +""" + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + ... + """ +``` + +### 4. Polling with Timeouts + +All asynchronous operations use `waitFor` with consistent timeout (300s) and tick (1s): + +```go +waitFor(ctx, func () bool { + // Check condition + return conditionMet +}) +``` + +### 5. Feature Gate Detection + +Tests automatically detect enabled feature gates from the running controller and skip scenarios that require disabled +features. + +## Common Step Patterns + +A list of available, implemented steps can be obtained by running: + +```shell +go test test/e2e/features_test.go -d +``` + +## Best Practices + +1. **Keep scenarios focused**: Each scenario should test one specific behavior +2. **Use Background wisely**: Common setup steps belong in Background +3. **Reuse steps**: Leverage existing step definitions before creating new ones +4. **Meaningful names**: Scenario names should clearly describe what is being tested +5. **Avoid implementation details**: Focus on behavior, not implementation + +## References + +- [Godog Documentation](https://github.com/cucumber/godog) +- [Gherkin Reference](https://cucumber.io/docs/gherkin/reference/) +- [Cucumber Best Practices](https://cucumber.io/docs/guides/10-minute-tutorial/) diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go deleted file mode 100644 index b3380ff0f5..0000000000 --- a/test/e2e/cluster_extension_install_test.go +++ /dev/null @@ -1,798 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "slices" - "testing" - "time" - - "github.com/google/go-containerregistry/pkg/crane" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -const ( - artifactName = "operator-controller-e2e" - pollDuration = time.Minute - pollInterval = time.Second - testCatalogRefEnvVar = "CATALOG_IMG" - testCatalogName = "test-catalog" -) - -func TestClusterExtensionInstallRegistry(t *testing.T) { - type testCase struct { - name string - packageName string - } - for _, tc := range []testCase{ - { - name: "no registry configuration necessary", - packageName: "test", - }, - { - // NOTE: This test requires an extra configuration in /etc/containers/registries.conf, which is mounted - // for this e2e via the ./config/components/e2e/registries-conf kustomize component as part of the e2e component. - // The goal here is to prove that "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" is - // mapped to the "real" registry hostname ("docker-registry.operator-controller-e2e.svc.cluster.local:5000"). - name: "package requires mirror registry configuration in /etc/containers/registries.conf", - packageName: "test-mirrored", - }, - } { - t.Run(tc.name, func(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: tc.packageName, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By eventually creating the NetworkPolicy named 'test-operator-network-policy'") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - var np networkingv1.NetworkPolicy - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: "test-operator-network-policy", Namespace: ns.Name}, &np)) - }, pollDuration, pollInterval) - - t.Log("By verifying that no templating occurs for registry+v1 bundle manifests") - cm := corev1.ConfigMap{} - require.NoError(t, c.Get(context.Background(), types.NamespacedName{Namespace: ns.Name, Name: "test-configmap"}, &cm)) - require.Contains(t, cm.Annotations, "shouldNotTemplate") - require.Contains(t, cm.Annotations["shouldNotTemplate"], "{{ $labels.namespace }}") - }) - } -} - -func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { - // NOTE: Like 'TestClusterExtensionInstallRegistry', this test also requires extra configuration in /etc/containers/registries.conf - packageName := "dynamic" - - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: packageName, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It updates the registries.conf file contents") - cm := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "e2e-registries-conf", - Namespace: "olmv1-system", - }, - Data: map[string]string{ - "registries.conf": `[[registry]] -prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" -location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, - }, - } - require.NoError(t, c.Update(context.Background(), &cm)) - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, 2*time.Minute, pollInterval) - - // Give the check 2 minutes instead of the typical 1 for the pod's - // files to update from the configmap change. - // The theoretical max time is the kubelet sync period of 1 minute + - // ConfigMap cache TTL of 1 minute = 2 minutes - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, 2*time.Minute, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - extraCatalogName := fmt.Sprintf("extra-test-catalog-%s", rand.String(8)) - extraCatalog, err := CreateTestCatalog(context.Background(), extraCatalogName, os.Getenv(testCatalogRefEnvVar)) - require.NoError(t, err) - - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - defer func(cat *ocv1.ClusterCatalog) { - require.NoError(t, c.Delete(context.Background(), cat)) - require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) - return errors.IsNotFound(err) - }, pollDuration, pollInterval) - }(extraCatalog) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves to multiple bundle paths") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a failed resolution with multiple bundles") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True and Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - // Catalog names are sorted alphabetically in the error message - catalogs := []string{extensionCatalog.Name, extraCatalog.Name} - slices.Sort(catalogs) - expectedMessage := fmt.Sprintf("in multiple catalogs with the same priority %v", catalogs) - require.Contains(ct, cond.Message, expectedMessage) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - // No Selector since this is an exact version match - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful installation") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - require.Equal(ct, - &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{ - Name: "test-operator.1.0.0", - Version: "1.0.0", - }}, - clusterExtension.Status.Install, - ) - - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It does not allow to upgrade the ClusterExtension to a non-successor version") - t.Log("By updating the ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting an unsatisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True and Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Equal(ct, "error upgrading from currently installed version \"1.0.0\": no bundles found for package \"test\" matching version \"1.2.0\"", cond.Message) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It allows to upgrade the ClusterExtension to a non-successor version") - t.Log("By updating the ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a satisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It does allow to upgrade the ClusterExtension to any of the successor versions within non-zero major version") - t.Log("By updating the ClusterExtension resource by skipping versions") - // 1.0.1 replaces 1.0.0 in the test catalog - clusterExtension.Spec.Source.Catalog.Version = "1.0.1" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when a catalog is patched with new ImageRef") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "olm.operatorframework.io/metadata.name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{extensionCatalog.Name}, - }, - }, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - // patch imageRef tag on test-catalog image with v2 image - t.Log("By patching the catalog ImageRef to point to the v2 catalog") - updatedCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:v2", os.Getenv("CLUSTER_REGISTRY_HOST")) - err := patchTestCatalog(context.Background(), extensionCatalog.Name, updatedCatalogImage) - require.NoError(t, err) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.Contains(ct, clusterExtension.Status.Install.Bundle.Version, "1.3.0") - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when a new catalog is available") - - // Tag the image with the new tag - var err error - v1Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V1")) - err = crane.Tag(v1Image, latestImageTag, crane.Insecure) - require.NoError(t, err) - - // create a test-catalog with latest image tag - catalogName := fmt.Sprintf("test-catalog-%s", rand.String(8)) - latestCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:latest", os.Getenv("CLUSTER_REGISTRY_HOST")) - extensionCatalog, err := CreateTestCatalog(context.Background(), catalogName, latestCatalogImage) - require.NoError(t, err) - clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8)) - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterExtensionName, - }, - } - ns, err := CreateNamespace(context.Background(), clusterExtensionName) - require.NoError(t, err) - sa, err := CreateServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) - require.NoError(t, err) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - // update tag on test-catalog image with v2 image - t.Log("By updating the catalog tag to point to the v2 catalog") - v2Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V2")) - err = crane.Tag(v2Image, latestImageTag, crane.Insecure) - require.NoError(t, err) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when managed content is changed") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It installs the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful installation") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - }, pollDuration, pollInterval) - - t.Log("By deleting a managed resource") - testConfigMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-configmap", - Namespace: clusterExtension.Spec.Namespace, - }, - } - require.NoError(t, c.Delete(context.Background(), testConfigMap)) - - t.Log("By eventually re-creating the managed resource") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionRecoversFromNoNamespaceWhenFailureFixed(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - t.Log("By not creating the Namespace and ServiceAccount") - clusterExtension, extensionCatalog := TestInitClusterExtensionClusterCatalog(t) - - defer TestCleanup(t, extensionCatalog, clusterExtension, nil, nil) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: clusterExtension.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: clusterExtension.Name, - }, - } - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting Progressing == True with Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Installed != True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.NotEqual(ct, metav1.ConditionTrue, cond.Status) - }, pollDuration, pollInterval) - - t.Log("By creating the Namespace and ServiceAccount") - sa, ns := TestInitServiceAccountNamespace(t, clusterExtension.Name) - defer TestCleanup(t, nil, nil, sa, ns) - - // NOTE: In order to ensure predictable results we need to ensure we have a single - // known failure with a singular fix operation. Additionally, due to the exponential - // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension - // after creating int the Namespace and ServiceAccount. - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True with Reason Success") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionRecoversFromExistingDeploymentWhenFailureFixed(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: clusterExtension.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: clusterExtension.Name, - }, - } - - t.Log("By creating a new Deployment that can not be adopted") - newDeployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-operator", - Namespace: clusterExtension.Name, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To(int32(1)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test-operator"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test-operator"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Command: []string{"sleep", "1000"}, - Image: "busybox", - ImagePullPolicy: corev1.PullAlways, - Name: "busybox", - SecurityContext: &corev1.SecurityContext{ - RunAsNonRoot: ptr.To(true), - RunAsUser: ptr.To(int64(1000)), - AllowPrivilegeEscalation: ptr.To(false), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - }, - }, - }, - }, - }, - }, - } - require.NoError(t, c.Create(context.Background(), newDeployment)) - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting Progressing == True with Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually failing to install the package successfully due to no adoption support") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - // TODO: We probably _should_ be testing the reason here, but helm and boxcutter applier have different reasons. - // Maybe we change helm to use "Absent" rather than "Failed" since the Progressing condition already captures - // the failure? - //require.Equal(ct, ocv1.ReasonFailed, cond.Reason) - require.Contains(ct, cond.Message, "No bundle installed") - }, pollDuration, pollInterval) - - t.Log("By deleting the new Deployment") - require.NoError(t, c.Delete(context.Background(), newDeployment)) - - // NOTE: In order to ensure predictable results we need to ensure we have a single - // known failure with a singular fix operation. Additionally, due to the exponential - // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension - // after deleting the Deployment. - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True with Reason Success") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go deleted file mode 100644 index aa033a2f1e..0000000000 --- a/test/e2e/e2e_suite_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" - utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" -) - -var ( - cfg *rest.Config - c client.Client -) - -const ( - testSummaryOutputEnvVar = "E2E_SUMMARY_OUTPUT" - latestImageTag = "latest" -) - -func TestMain(m *testing.M) { - cfg = ctrl.GetConfigOrDie() - - var err error - utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) - c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - utilruntime.Must(err) - - res := m.Run() - path := os.Getenv(testSummaryOutputEnvVar) - if path == "" { - fmt.Printf("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation") - } else { - err = utils.PrintSummary(path) - if err != nil { - // Fail the run if alerts are found - fmt.Printf("%v", err) - os.Exit(1) - } - } - os.Exit(res) -} - -// patchTestCatalog will patch the existing clusterCatalog on the test cluster, provided -// the context, catalog name, and the image reference. It returns an error -// if any errors occurred while updating the catalog. -func patchTestCatalog(ctx context.Context, name string, newImageRef string) error { - // Fetch the existing ClusterCatalog - catalog := &ocv1.ClusterCatalog{} - err := c.Get(ctx, client.ObjectKey{Name: name}, catalog) - if err != nil { - return err - } - - // Update the ImageRef - catalog.Spec.Source.Image.Ref = newImageRef - - // Patch the ClusterCatalog - err = c.Update(ctx, catalog) - if err != nil { - return err - } - - return err -} diff --git a/test/e2e/features/install.feature b/test/e2e/features/install.feature new file mode 100644 index 0000000000..9aebd87612 --- /dev/null +++ b/test/e2e/features/install.feature @@ -0,0 +1,272 @@ +Feature: Install ClusterExtension + + As an OLM user I would like to install a cluster extension from catalog + or get an appropriate information in case of an error. + + Background: + Given OLM is available + And "test" catalog serves bundles + And Service account "olm-sa" with needed permissions is available in test namespace + + Scenario Outline: Install latest available version from the default channel + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "-operator.1.2.0" is installed in version "1.2.0" + And resource "networkpolicy/test-operator-network-policy" is installed + And resource "configmap/test-configmap" is installed + And resource "deployment/test-operator" is installed + + Examples: + | package-name | + | test | + | test-mirrored | + + + Scenario: Report that bundle cannot be installed when exists in multiple catalogs with same priority + Given "extra" catalog serves bundles + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + """ + Then ClusterExtension reports Progressing as True with Reason Retrying: + """ + found bundles for package "test" in multiple catalogs with the same priority [extra-catalog test-catalog] + """ + + @SingleOwnNamespaceInstallSupport + Scenario: watchNamespace config is required for extension supporting single namespace + Given Service account "olm-admin" in test namespace is cluster admin + And resource is applied + """ + apiVersion: v1 + kind: Namespace + metadata: + name: single-namespace-operator-target + """ + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: single-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying: + """ + error for resolved bundle "single-namespace-operator.1.0.0" with version "1.0.0": + invalid ClusterExtension configuration: invalid configuration: required field "watchNamespace" is missing + """ + When ClusterExtension is updated + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: single-namespace-operator-target # added + source: + sourceType: Catalog + catalog: + packageName: single-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension reports Installed as True + And bundle "single-namespace-operator.1.0.0" is installed in version "1.0.0" + And operator "single-namespace-operator" target namespace is "single-namespace-operator-target" + + @SingleOwnNamespaceInstallSupport + Scenario: watchNamespace config is required for extension supporting own namespace + Given Service account "olm-admin" in test namespace is cluster admin + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying: + """ + error for resolved bundle "own-namespace-operator.1.0.0" with version + "1.0.0": invalid ClusterExtension configuration: invalid configuration: required + field "watchNamespace" is missing + """ + And ClusterExtension is updated + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: some-ns # added, but not own namespace + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying: + """ + error for resolved bundle "own-namespace-operator.1.0.0" with version + "1.0.0": invalid ClusterExtension configuration: invalid configuration: 'some-ns' + is not valid ownNamespaceInstallMode: invalid value "some-ns": watchNamespace + must be "$TEST_NAMESPACE" (the namespace where the operator is installed) because this + operator only supports OwnNamespace install mode + """ + When ClusterExtension is updated + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: $TEST_NAMESPACE # own namespace + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And operator "own-namespace-operator" target namespace is "$TEST_NAMESPACE" + + @WebhookProviderCertManager + Scenario: Install operator having webhooks + Given Service account "olm-admin" in test namespace is cluster admin + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: webhook-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And resource apply fails with error msg containing "Invalid value: false: Spec.Valid must be true" + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: $NAME + namespace: $TEST_NAMESPACE + spec: + valid: false # webhook rejects it as invalid value + """ + And resource is applied + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: $NAME + namespace: $TEST_NAMESPACE + spec: + valid: true + """ + And resource "webhooktest/$NAME" matches + """ + apiVersion: webhook.operators.coreos.io/v2 + kind: WebhookTest + metadata: + name: $NAME + namespace: $TEST_NAMESPACE + spec: + conversion: + valid: true + mutate: true + """ + And resource "webhooktest.v1.webhook.operators.coreos.io/$NAME" matches + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: $NAME + namespace: $TEST_NAMESPACE + spec: + valid: true + mutate: true + """ \ No newline at end of file diff --git a/test/e2e/features/metrics.feature b/test/e2e/features/metrics.feature new file mode 100644 index 0000000000..fb25581360 --- /dev/null +++ b/test/e2e/features/metrics.feature @@ -0,0 +1,14 @@ +Feature: Exposed various metrics + + Background: + Given OLM is available + + Scenario Outline: component exposes metrics + Given Service account "metrics-reader" in test namespace has permissions to fetch "" metrics + When Service account "metrics-reader" sends request to "/metrics" endpoint of "" service + Then Prometheus metrics are returned in the response + + Examples: + | component | + | operator-controller | + | catalogd | \ No newline at end of file diff --git a/test/e2e/features/recover.feature b/test/e2e/features/recover.feature new file mode 100644 index 0000000000..b8671ac546 --- /dev/null +++ b/test/e2e/features/recover.feature @@ -0,0 +1,117 @@ +Feature: Recover cluster extension from errors that might occur during its lifetime + + Background: + Given OLM is available + And "test" catalog serves bundles + + + Scenario: Restore removed resource + Given Service account "olm-sa" with needed permissions is available in test namespace + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is available + And resource "configmap/test-configmap" is available + When resource "configmap/test-configmap" is removed + Then resource "configmap/test-configmap" is eventually restored + + Scenario: Install ClusterExtension after target namespace becomes available + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying + When Service account "olm-sa" with needed permissions is available in test namespace + Then ClusterExtension is available + And ClusterExtension reports Progressing as True with Reason Succeeded + + Scenario: Install ClusterExtension after conflicting resource is removed + Given Service account "olm-sa" with needed permissions is available in test namespace + And resource is applied + """ + apiVersion: apps/v1 + kind: Deployment + metadata: + name: test-operator + namespace: $TEST_NAMESPACE + spec: + replicas: 1 + selector: + matchLabels: + app: test-operator + template: + metadata: + labels: + app: test-operator + spec: + containers: + - command: + - "sleep" + args: + - "1000" + image: busybox:1.36 + imagePullPolicy: IfNotPresent + name: busybox + securityContext: + runAsNonRoot: true + runAsUser: 1000 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + """ + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying + And ClusterExtension reports Installed as False + When resource "deployment/test-operator" is removed + Then ClusterExtension is available + And ClusterExtension reports Progressing as True with Reason Succeeded + And ClusterExtension reports Installed as True diff --git a/test/e2e/features/update.feature b/test/e2e/features/update.feature new file mode 100644 index 0000000000..ebbfdb91ae --- /dev/null +++ b/test/e2e/features/update.feature @@ -0,0 +1,247 @@ +Feature: Update ClusterExtension + + As an OLM user I would like to update a ClusterExtension from a catalog + or get an appropriate information in case of an error. + + Background: + Given OLM is available + And "test" catalog serves bundles + And Service account "olm-sa" with needed permissions is available in test namespace + + Scenario: Update to a successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.1 + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.0.1" is installed in version "1.0.1" + + Scenario: Cannot update extension to non successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.2.0 + """ + Then ClusterExtension reports Progressing as True with Reason Retrying: + """ + error upgrading from currently installed version "1.0.0": no bundles found for package "test" matching version "1.2.0" + """ + + Scenario: Force update to non successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.2.0 + upgradeConstraintPolicy: SelfCertified + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + + Scenario: Auto update when new version becomes available in the new catalog image ref + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + When "test" catalog is updated to version "v2" + Then bundle "test-operator.1.3.0" is installed in version "1.3.0" + + Scenario: Auto update when new version becomes available in the same catalog image ref + Given "test" catalog image version "v1" is also tagged as "latest" + And "test" catalog is updated to version "latest" + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + When "test" catalog image version "v2" is also tagged as "latest" + Then bundle "test-operator.1.3.0" is installed in version "1.3.0" + + @BoxcutterRuntime + Scenario: Each update creates a new revision + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is updated + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: $NAME + spec: + namespace: $TEST_NAMESPACE + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.2.0 + upgradeConstraintPolicy: SelfCertified + """ + Then bundle "test-operator.1.2.0" is installed in version "1.2.0" + And ClusterExtension is rolled out + And ClusterExtension is available + And resource "clusterextensionrevision/$NAME-1" is available + And resource "clusterextensionrevision/$NAME-2" is available diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go new file mode 100644 index 0000000000..7ef8750092 --- /dev/null +++ b/test/e2e/features_test.go @@ -0,0 +1,81 @@ +package e2e + +import ( + //"context" + "fmt" + "log" + "os" + "testing" + + "github.com/cucumber/godog" + "github.com/cucumber/godog/colors" + "github.com/spf13/pflag" + ctrl "sigs.k8s.io/controller-runtime" + //ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" + "github.com/operator-framework/operator-controller/test/e2e/steps" +) + +var opts = godog.Options{ + Format: "pretty", + Paths: []string{"features"}, + Output: colors.Colored(os.Stdout), + Concurrency: 1, +} + +var logOpts = zap.Options{} + +func init() { + flagSet := pflag.CommandLine + flagSet.BoolVar(&logOpts.Development, "log.debug", false, "print debug log level") + godog.BindCommandLineFlags("godog.", &opts) +} + +func TestMain(m *testing.M) { + // parse CLI arguments + pflag.Parse() + opts.Paths = pflag.Args() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&logOpts))) + + //opts.DefaultContext = ctrl.LoggerInto(context.Background(), ctrllog.Log) + // run tests + sc := godog.TestSuite{ + TestSuiteInitializer: InitializeSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + switch sc { + // 0 - success + case 0: + + path := os.Getenv("E2E_SUMMARY_OUTPUT") + if path == "" { + fmt.Printf("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation\n") + } else { + if err := utils.PrintSummary(path); err != nil { + // Fail the run if alerts are found + fmt.Printf("%v", err) + os.Exit(1) + } + } + return + + // 1 - failed + // 2 - command line usage error + // 128 - or higher, os signal related error exit codes + default: + log.Fatalf("non-zero status returned (%d), failed to run feature tests", sc) + } +} + +func InitializeSuite(tc *godog.TestSuiteContext) { + tc.BeforeSuite(steps.DetectEnabledFeatureGates) +} + +func InitializeScenario(sc *godog.ScenarioContext) { + steps.RegisterSteps(sc) + steps.RegisterHooks(sc) +} diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go deleted file mode 100644 index e41827987d..0000000000 --- a/test/e2e/metrics_test.go +++ /dev/null @@ -1,253 +0,0 @@ -// Package e2e contains end-to-end tests to verify that the metrics endpoints -// for both components. Metrics are exported and accessible by authorized users through -// RBAC and ServiceAccount tokens. -// -// These tests perform the following steps: -// 1. Create a ClusterRoleBinding to grant necessary permissions for accessing metrics. -// 2. Generate a ServiceAccount token for authentication. -// 3. Deploy a curl pod to interact with the metrics endpoint. -// 4. Wait for the curl pod to become ready. -// 5. Execute a curl command from the pod to validate the metrics endpoint. -// 6. Clean up all resources created during the test, such as the ClusterRoleBinding and curl pod. -// -//nolint:gosec -package e2e - -import ( - "bytes" - "context" - "fmt" - "io" - "os/exec" - "testing" - "time" - - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/util/rand" - - utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" -) - -// TestOperatorControllerMetricsExportedEndpoint verifies that the metrics endpoint for the operator controller -func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { - client := utils.FindK8sClient(t) - curlNamespace := createRandomNamespace(t, client) - componentNamespace := getComponentNamespace(t, client, "control-plane=operator-controller-controller-manager") - metricsURL := fmt.Sprintf("https://operator-controller-service.%s.svc.cluster.local:8443/metrics", componentNamespace) - - config := NewMetricsTestConfig( - client, - curlNamespace, - "operator-controller-metrics-reader", - "operator-controller-metrics-binding", - "operator-controller-metrics-reader", - "oper-curl-metrics", - metricsURL, - ) - - config.run(t) -} - -// TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for catalogd -func TestCatalogdMetricsExportedEndpoint(t *testing.T) { - client := utils.FindK8sClient(t) - curlNamespace := createRandomNamespace(t, client) - componentNamespace := getComponentNamespace(t, client, "control-plane=catalogd-controller-manager") - metricsURL := fmt.Sprintf("https://catalogd-service.%s.svc.cluster.local:7443/metrics", componentNamespace) - - config := NewMetricsTestConfig( - client, - curlNamespace, - "catalogd-metrics-reader", - "catalogd-metrics-binding", - "catalogd-metrics-reader", - "catalogd-curl-metrics", - metricsURL, - ) - - config.run(t) -} - -// MetricsTestConfig holds the necessary configurations for testing metrics endpoints. -type MetricsTestConfig struct { - client string - namespace string - clusterRole string - clusterBinding string - serviceAccount string - curlPodName string - metricsURL string -} - -// NewMetricsTestConfig initializes a new MetricsTestConfig. -func NewMetricsTestConfig(client, namespace, clusterRole, clusterBinding, serviceAccount, curlPodName, metricsURL string) *MetricsTestConfig { - return &MetricsTestConfig{ - client: client, - namespace: namespace, - clusterRole: clusterRole, - clusterBinding: clusterBinding, - serviceAccount: serviceAccount, - curlPodName: curlPodName, - metricsURL: metricsURL, - } -} - -// run will execute all steps of those tests -func (c *MetricsTestConfig) run(t *testing.T) { - defer c.cleanup(t) - - c.createMetricsClusterRoleBinding(t) - token := c.getServiceAccountToken(t) - c.createCurlMetricsPod(t) - c.validate(t, token) -} - -// createMetricsClusterRoleBinding to binding and expose the metrics -func (c *MetricsTestConfig) createMetricsClusterRoleBinding(t *testing.T) { - t.Logf("Creating ClusterRoleBinding %s for %s in namespace %s", c.clusterBinding, c.serviceAccount, c.namespace) - cmd := exec.Command(c.client, "create", "clusterrolebinding", c.clusterBinding, - "--clusterrole="+c.clusterRole, - "--serviceaccount="+c.namespace+":"+c.serviceAccount) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating ClusterRoleBinding: %s", string(output)) -} - -// getServiceAccountToken return the token requires to have access to the metrics -func (c *MetricsTestConfig) getServiceAccountToken(t *testing.T) string { - t.Logf("Creating ServiceAccount %q in namespace %q", c.serviceAccount, c.namespace) - output, err := exec.Command(c.client, "create", "serviceaccount", c.serviceAccount, "--namespace="+c.namespace).CombinedOutput() - require.NoError(t, err, "Error creating service account: %v", string(output)) - - t.Logf("Generating ServiceAccount token for %q in namespace %q", c.serviceAccount, c.namespace) - cmd := exec.Command(c.client, "create", "token", c.serviceAccount, "--namespace", c.namespace) - tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(cmd) - require.NoError(t, err, "Error creating token: %s", string(tokenCombinedOutput)) - return string(bytes.TrimSpace(tokenOutput)) -} - -// createCurlMetricsPod creates the Pod with curl image to allow check if the metrics are working -func (c *MetricsTestConfig) createCurlMetricsPod(t *testing.T) { - t.Logf("Creating curl pod (%s/%s) to validate the metrics endpoint", c.namespace, c.curlPodName) - cmd := exec.Command(c.client, "run", c.curlPodName, - "--image=quay.io/curl/curl:8.15.0", - "--namespace", c.namespace, - "--restart=Never", - "--overrides", `{ - "spec": { - "terminationGradePeriodSeconds": 0, - "containers": [{ - "name": "curl", - "image": "quay.io/curl/curl:8.15.0", - "command": ["sh", "-c", "sleep 3600"], - "securityContext": { - "allowPrivilegeEscalation": false, - "capabilities": {"drop": ["ALL"]}, - "runAsNonRoot": true, - "runAsUser": 1000, - "seccompProfile": {"type": "RuntimeDefault"} - } - }], - "serviceAccountName": "`+c.serviceAccount+`" - } - }`) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating curl pod: %s", string(output)) -} - -// validate verifies if is possible to access the metrics -func (c *MetricsTestConfig) validate(t *testing.T, token string) { - t.Log("Waiting for the curl pod to be ready") - waitCmd := exec.Command(c.client, "wait", "--for=condition=Ready", "pod", c.curlPodName, "--namespace", c.namespace, "--timeout=60s") - waitOutput, waitErr := waitCmd.CombinedOutput() - require.NoError(t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput)) - - t.Log("Validating the metrics endpoint") - curlCmd := exec.Command(c.client, "exec", c.curlPodName, "--namespace", c.namespace, "--", - "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, c.metricsURL) - output, err := curlCmd.CombinedOutput() - require.NoError(t, err, "Error calling metrics endpoint: %s", string(output)) - require.Contains(t, string(output), "200 OK", "Metrics endpoint did not return 200 OK") -} - -// cleanup removes the created resources. Uses a context with timeout to prevent hangs. -func (c *MetricsTestConfig) cleanup(t *testing.T) { - type objDesc struct { - resourceName string - name string - namespace string - } - objects := []objDesc{ - {"clusterrolebinding", c.clusterBinding, ""}, - {"pod", c.curlPodName, c.namespace}, - {"serviceaccount", c.serviceAccount, c.namespace}, - {"namespace", c.namespace, ""}, - } - - t.Log("Cleaning up resources") - for _, obj := range objects { - args := []string{"delete", obj.resourceName, obj.name, "--ignore-not-found=true", "--force"} - if obj.namespace != "" { - args = append(args, "--namespace", obj.namespace) - } - output, err := exec.Command(c.client, args...).CombinedOutput() - require.NoError(t, err, "Error deleting %q %q in namespace %q: %v", obj.resourceName, obj.name, obj.namespace, string(output)) - } - - // Create a context with a 60-second timeout. - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - for _, obj := range objects { - err := waitForDeletion(ctx, c.client, obj.resourceName, obj.name, obj.namespace) - require.NoError(t, err, "Error deleting %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace) - t.Logf("Successfully deleted %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace) - } -} - -// waitForDeletion uses "kubectl wait" to block until the specified resource is deleted -// or until the 60-second timeout is reached. -func waitForDeletion(ctx context.Context, client, resourceType, resourceName, resourceNamespace string) error { - args := []string{"wait", "--for=delete", "--timeout=60s", resourceType, resourceName} - if resourceNamespace != "" { - args = append(args, "--namespace", resourceNamespace) - } - cmd := exec.CommandContext(ctx, client, args...) - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("error waiting for deletion of %s %s: %v, output: %s", resourceType, resourceName, err, string(output)) - } - return nil -} - -// createRandomNamespace creates a random namespace -func createRandomNamespace(t *testing.T, client string) string { - nsName := fmt.Sprintf("testns-%s", rand.String(8)) - - cmd := exec.Command(client, "create", "namespace", nsName) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating namespace: %s", string(output)) - - return nsName -} - -// getComponentNamespace returns the namespace where operator-controller or catalogd is running -func getComponentNamespace(t *testing.T, client, selector string) string { - cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}") - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error determining namespace: %s", string(output)) - - namespace := string(bytes.TrimSpace(output)) - if namespace == "" { - t.Fatal("No namespace found for selector " + selector) - } - return namespace -} - -func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) { - var outOnly, outAndErr bytes.Buffer - allWriter := io.MultiWriter(&outOnly, &outAndErr) - cmd.Stdout = allWriter - cmd.Stderr = &outAndErr - err := cmd.Run() - return outOnly.Bytes(), outAndErr.Bytes(), err -} diff --git a/test/e2e/network_policy_test.go b/test/e2e/network_policy_test.go index 00143df416..1c1453c30b 100644 --- a/test/e2e/network_policy_test.go +++ b/test/e2e/network_policy_test.go @@ -1,20 +1,27 @@ package e2e import ( + "bytes" "context" "fmt" + "os/exec" "strings" "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/rest" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" ) @@ -28,6 +35,11 @@ const ( operatorControllerMetricsPort = 8443 ) +var ( + cfg *rest.Config + c client.Client +) + type portWithJustification struct { port []networkingv1.NetworkPolicyPort justification string @@ -377,3 +389,25 @@ func validateSingleIngressRule(t *testing.T, policyName string, clusterIngressRu require.ElementsMatchf(t, allExpectedPortsFromPwJ, clusterIngressRule.Ports, "Policy %q, Ingress Rule: 'Ports' mismatch (aggregated from PortWithJustification). Expected: %+v, Got: %+v", policyName, allExpectedPortsFromPwJ, clusterIngressRule.Ports) } + +// getComponentNamespace returns the namespace where operator-controller or catalogd is running +func getComponentNamespace(t *testing.T, client, selector string) string { + cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}") //nolint:gosec // just gathering pods for a given selector + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Error determining namespace: %s", string(output)) + + namespace := string(bytes.TrimSpace(output)) + if namespace == "" { + t.Fatal("No namespace found for selector " + selector) + } + return namespace +} + +func init() { + cfg = ctrl.GetConfigOrDie() + + var err error + utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) + c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + utilruntime.Must(err) +} diff --git a/test/e2e/single_namespace_support_test.go b/test/e2e/single_namespace_support_test.go deleted file mode 100644 index 2c3b825a1a..0000000000 --- a/test/e2e/single_namespace_support_test.go +++ /dev/null @@ -1,412 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -const ( - soNsFlag = "SingleOwnNamespaceInstallSupport" -) - -func TestClusterExtensionSingleNamespaceSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("Test support for cluster extension config") - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating install namespace, watch namespace and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - watchNamespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-target", - }, - } - require.NoError(t, c.Create(t.Context(), &watchNamespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &watchNamespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the test-catalog ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By attempting to install the single-namespace-operator ClusterExtension without any configuration") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "single-namespace-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for single-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`) - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, watchNamespace.GetName())), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for single-namespace-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By ensuring the single-namespace-operator deployment is correctly configured to watch the watch namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "single-namespace-operator"}, deployment)) - require.NotNil(ct, deployment.Spec.Template.GetAnnotations()) - require.Equal(ct, watchNamespace.GetName(), deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"]) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionOwnNamespaceSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("Test support for cluster extension with OwnNamespace install mode support") - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating install namespace, watch namespace and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the test-catalog ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By attempting to install the own-namespace-operator ClusterExtension without any configuration") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "own-namespace-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for own-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`) - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace other than the install namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(`{"watchNamespace": "some-namespace"}`), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for own-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, "invalid ClusterExtension configuration") - require.Contains(ct, cond.Message, fmt.Sprintf("watchNamespace must be \"%s\"", clusterExtension.Spec.Namespace)) - require.Contains(ct, cond.Message, "OwnNamespace install mode") - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace = install namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, clusterExtension.Spec.Namespace)), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for own-namespace-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By ensuring the own-namespace-operator deployment is correctly configured to watch the watch namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "own-namespace-operator"}, deployment)) - require.NotNil(ct, deployment.Spec.Template.GetAnnotations()) - require.Equal(ct, clusterExtension.Spec.Namespace, deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"]) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionVersionUpdate(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It allows to upgrade the ClusterExtension to a non-successor version") - t.Log("By forcing update of ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a satisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - t.Log("We should have two ClusterExtensionRevision resources") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - cerList := &ocv1.ClusterExtensionRevisionList{} - require.NoError(ct, c.List(context.Background(), cerList)) - require.Len(ct, cerList.Items, 2) - }, pollDuration, pollInterval) -} diff --git a/test/e2e/steps/hooks.go b/test/e2e/steps/hooks.go new file mode 100644 index 0000000000..e426aa0982 --- /dev/null +++ b/test/e2e/steps/hooks.go @@ -0,0 +1,122 @@ +package steps + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + + "github.com/cucumber/godog" + "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +type resource struct { + name string + kind string +} + +type scenarioContext struct { + id string + namespace string + clusterExtensionName string + addedResources []resource + removedResources []unstructured.Unstructured + backGroundCmds []*exec.Cmd + metricsResponse string +} + +type contextKey string + +const ( + scenarioContextKey contextKey = "scenario-context" +) + +var featureGates = map[string]bool{ + "WebhookProviderCertManager": true, +} + +func RegisterHooks(sc *godog.ScenarioContext) { + sc.Before(CheckFeatureTags) + sc.Before(CreateScenarioContext) + + sc.After(ScenarioCleanup) +} + +func DetectEnabledFeatureGates() { + raw, err := kubectl("get", "deployment", "-n", olmNamespace, olmDeploymentName, "-o", "json") + if err != nil { + return + } + d := &v1.Deployment{} + if err := json.Unmarshal([]byte(raw), d); err != nil { + return + } + + featureGatePattern := regexp.MustCompile(`--feature-gates=([[:alnum:]]+)=(true|false)`) + for _, c := range d.Spec.Template.Spec.Containers { + if c.Name == "manager" { + for _, arg := range c.Args { + if matches := featureGatePattern.FindStringSubmatch(arg); matches != nil { + v, _ := strconv.ParseBool(matches[2]) + featureGates[matches[1]] = v + } + } + } + } + logger.Info(fmt.Sprintf("Enabled feature gates: %v", featureGates)) +} + +func CheckFeatureTags(ctx context.Context, sc *godog.Scenario) (context.Context, error) { + for _, tag := range sc.Tags { + if enabled, found := featureGates[tag.Name[1:]]; !found || (found && !enabled) { + logger.V(1).Info(fmt.Sprintf("Skipping scenario %q because feature gate %q is disabled", sc.Name, tag.Name[1:])) + return ctx, godog.ErrSkip + } + } + return ctx, nil +} + +func CreateScenarioContext(ctx context.Context, sc *godog.Scenario) (context.Context, error) { + scCtx := &scenarioContext{ + id: sc.Id, + namespace: fmt.Sprintf("ns-%s", sc.Id), + clusterExtensionName: fmt.Sprintf("ce-%s", sc.Id), + } + return context.WithValue(ctx, scenarioContextKey, scCtx), nil +} + +func scenarioCtx(ctx context.Context) *scenarioContext { + return ctx.Value(scenarioContextKey).(*scenarioContext) +} + +func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context.Context, error) { + sc := scenarioCtx(ctx) + for _, p := range sc.backGroundCmds { + p.Process.Kill() // nolint: errcheck // we don't care about the error here, we just want to kill the process + p.Process.Wait() // nolint: errcheck // same as above, we just want to wait for the process to exit, and do not want to fail the test if it does not + } + if err != nil { + return ctx, err + } + + forDeletion := []resource{} + if sc.clusterExtensionName != "" { + forDeletion = append(forDeletion, resource{name: sc.clusterExtensionName, kind: "clusterextension"}) + } + forDeletion = append(forDeletion, sc.addedResources...) + forDeletion = append(forDeletion, resource{name: sc.namespace, kind: "namespace"}) + for _, r := range forDeletion { + if _, err := kubectl("delete", r.kind, r.name, "-n", sc.namespace); err != nil { + logger.Info("Error deleting resource", "name", r.name, "namespace", sc.namespace, "stderr", string(func() *exec.ExitError { + target := &exec.ExitError{} + _ = errors.As(err, &target) + return target + }().Stderr)) + } + } + return ctx, nil +} diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go new file mode 100644 index 0000000000..399234352a --- /dev/null +++ b/test/e2e/steps/steps.go @@ -0,0 +1,531 @@ +package steps + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/cucumber/godog" + jsonpatch "github.com/evanphx/json-patch" + "github.com/google/go-cmp/cmp" + "github.com/google/go-containerregistry/pkg/crane" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" +) + +const ( + olmNamespace = "olmv1-system" + olmDeploymentName = "operator-controller-controller-manager" + timeout = 300 * time.Second + tick = 1 * time.Second +) + +var kubeconfigPath string + +func RegisterSteps(sc *godog.ScenarioContext) { + sc.Step(`^OLM is available$`, OLMisAvailable) + sc.Step(`^bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled) + sc.Step(`^ClusterExtension is applied$`, ResourceIsApplied) + sc.Step(`^ClusterExtension is updated$`, ResourceIsApplied) + sc.Step(`^ClusterExtension is available$`, ClusterExtensionIsAvailable) + sc.Step(`^ClusterExtension is rolled out$`, ClusterExtensionIsRolledOut) + sc.Step(`^ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+):$`, ClusterExtensionReportsCondition) + sc.Step(`^ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutMsg) + sc.Step(`^ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutReason) + sc.Step(`^resource "([^"]+)" is installed$`, ResourceAvailable) + sc.Step(`^resource "([^"]+)" is available$`, ResourceAvailable) + sc.Step(`^resource "([^"]+)" is removed$`, ResourceRemoved) + sc.Step(`^resource is applied$`, ResourceIsApplied) + sc.Step(`^resource apply fails with error msg containing "([^"]+)"$`, ResourceApplyFails) + sc.Step(`^resource "([^"]+)" is eventually restored$`, ResourceRestored) + sc.Step(`^resource "([^"]+)" matches$`, ResourceMatches) + sc.Step(`^Service account "([^"]*)" with needed permissions is available in test namespace$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace) + sc.Step(`^Service account "([^"]*)" in test namespace is cluster admin$`, ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace) + sc.Step(`^Service account "([^"]+)" in test namespace has permissions to fetch "([^"]+)" metrics$`, ServiceAccountWithFetchMetricsPermissions) + sc.Step(`^Service account "([^"]+)" sends request to "([^"]+)" endpoint of "([^"]+)" service$`, SendMetricsRequest) + sc.Step(`^"([^"]+)" catalog is updated to version "([^"]+)"$`, CatalogIsUpdatedToVersion) + sc.Step(`^"([^"]+)" catalog serves bundles$`, CatalogServesBundles) + sc.Step(`^"([^"]+)" catalog image version "([^"]+)" is also tagged as "([^"]+)"$`, TagCatalogImage) + sc.Step(`^operator "([^"]+)" target namespace is "([^"]+)"$`, OperatorTargetNamespace) + sc.Step(`^Prometheus metrics are returned in the response$`, PrometheusMetricsAreReturned) +} + +func init() { + kubeconfigPath = os.Getenv("HOME") + "/.kube/config" +} + +var ( + logger = log.Log +) + +func kubectl(args ...string) (string, error) { + cmd := exec.Command("kubectl", args...) + logger.V(1).Info(strings.Join(cmd.Args, " ")) + cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + b, err := cmd.Output() + return string(b), err +} + +func kubectlWithInput(yaml string, args ...string) (string, error) { + cmd := exec.Command("kubectl", args...) + cmd.Stdin = bytes.NewBufferString(yaml) + cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + b, err := cmd.Output() + return string(b), err +} + +func OLMisAvailable(ctx context.Context) error { + require.Eventually(godog.T(ctx), func() bool { + v, err := kubectl("get", "deployment", "-n", olmNamespace, olmDeploymentName, "-o", "jsonpath='{.status.conditions[?(@.type==\"Available\")].status}'") + if err != nil { + return false + } + return v == "'True'" + }, timeout, tick) + return nil +} + +func BundleInstalled(ctx context.Context, name, version string) error { + sc := scenarioCtx(ctx) + waitFor(ctx, func() bool { + v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}") + if err != nil { + return false + } + var bundle map[string]interface{} + if err := json.Unmarshal([]byte(v), &bundle); err != nil { + return false + } + return bundle["name"] == name && bundle["version"] == version + }) + return nil +} + +func toUnstructured(yamlContent string) (*unstructured.Unstructured, error) { + var u map[string]any + if err := yaml.Unmarshal([]byte(yamlContent), &u); err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: u}, nil +} + +func substituteScenarioVars(content string, sc *scenarioContext) string { + result := strings.ReplaceAll(content, "$TEST_NAMESPACE", sc.namespace) + result = strings.ReplaceAll(result, "$NAME", sc.clusterExtensionName) + return result +} + +func ResourceApplyFails(ctx context.Context, errMsg string, yamlTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + yamlContent := substituteScenarioVars(yamlTemplate.Content, sc) + _, err := toUnstructured(yamlContent) + if err != nil { + return fmt.Errorf("failed to parse resource yaml: %v", err) + } + waitFor(ctx, func() bool { + _, err := kubectlWithInput(yamlContent, "apply", "-f", "-") + if err == nil { + return false + //return fmt.Errorf("expected apply to fail, got: %s", out) + } + if stdErr := string(func() *exec.ExitError { + target := &exec.ExitError{} + _ = errors.As(err, &target) + return target + }().Stderr); !strings.Contains(stdErr, errMsg) { + return false + //return fmt.Errorf("expected error message %s to be in stderr, got: %s", errMsg, stdErr) + } + return true + }) + return nil +} + +func ResourceIsApplied(ctx context.Context, yamlTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + yamlContent := substituteScenarioVars(yamlTemplate.Content, sc) + res, err := toUnstructured(yamlContent) + if err != nil { + return fmt.Errorf("failed to parse resource yaml: %v", err) + } + out, err := kubectlWithInput(yamlContent, "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply resource %v %w", out, err) + } + if res.GetKind() == "ClusterExtension" { + sc.clusterExtensionName = res.GetName() + } + return nil +} + +func ClusterExtensionIsAvailable(ctx context.Context) error { + sc := scenarioCtx(ctx) + require.Eventually(godog.T(ctx), func() bool { + v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Installed\")].status}") + if err != nil { + return false + } + return v == "True" + }, timeout, tick) + return nil +} + +func ClusterExtensionIsRolledOut(ctx context.Context) error { + sc := scenarioCtx(ctx) + require.Eventually(godog.T(ctx), func() bool { + v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Progressing\")]}") + if err != nil { + return false + } + + var condition map[string]interface{} + if err := json.Unmarshal([]byte(v), &condition); err != nil { + return false + } + return condition["status"] == "True" && condition["reason"] == "Succeeded" && condition["type"] == "Progressing" + }, timeout, tick) + return nil +} + +func waitFor(ctx context.Context, conditionFn func() bool) { + require.Eventually(godog.T(ctx), conditionFn, timeout, tick) +} + +func waitForExtensionCondition(ctx context.Context, conditionType, conditionStatus string, conditionReason *string, msg *string) error { + sc := scenarioCtx(ctx) + require.Eventually(godog.T(ctx), func() bool { + v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", fmt.Sprintf("jsonpath={.status.conditions[?(@.type==\"%s\")]}", conditionType)) + if err != nil { + return false + } + + var condition map[string]interface{} + if err := json.Unmarshal([]byte(v), &condition); err != nil { + return false + } + if condition["status"] != conditionStatus { + return false + } + if conditionReason != nil && condition["reason"] != *conditionReason { + return false + } + if msg != nil && condition["message"] != *msg { + return false + } + + return true + }, timeout, tick) + return nil +} + +func ClusterExtensionReportsCondition(ctx context.Context, conditionType, conditionStatus, conditionReason string, msg *godog.DocString) error { + var conditionMsg *string + if msg != nil { + conditionMsg = ptr.To(substituteScenarioVars(strings.Join(strings.Fields(msg.Content), " "), scenarioCtx(ctx))) + } + return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, conditionMsg) +} + +func ClusterExtensionReportsConditionWithoutMsg(ctx context.Context, conditionType, conditionStatus, conditionReason string) error { + return ClusterExtensionReportsCondition(ctx, conditionType, conditionStatus, conditionReason, nil) +} + +func ClusterExtensionReportsConditionWithoutReason(ctx context.Context, conditionType, conditionStatus string) error { + return waitForExtensionCondition(ctx, conditionType, conditionStatus, nil, nil) +} + +func ResourceAvailable(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + waitFor(ctx, func() bool { + _, err := kubectl("get", rtype, name, "-n", sc.namespace) + return err == nil + }) + return nil +} + +func ResourceRemoved(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + yaml, err := kubectl("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + if err != nil { + return err + } + obj, err := toUnstructured(yaml) + if err != nil { + return err + } + sc.removedResources = append(sc.removedResources, *obj) + _, err = kubectl("delete", rtype, name, "-n", sc.namespace) + return err +} + +func ResourceMatches(ctx context.Context, resource string, requiredContentTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + requiredContent, err := toUnstructured(substituteScenarioVars(requiredContentTemplate.Content, sc)) + if err != nil { + return fmt.Errorf("failed to parse required resource yaml: %v", err) + } + waitFor(ctx, func() bool { + objJson, err := kubectl("get", rtype, name, "-n", sc.namespace, "-o", "json") + if err != nil { + return false + } + obj, err := toUnstructured(objJson) + if err != nil { + return false + } + patch, err := json.Marshal(requiredContent.Object) + if err != nil { + return false + } + updJson, err := jsonpatch.MergePatch([]byte(objJson), patch) + if err != nil { + return false + } + upd, err := toUnstructured(string(updJson)) + if err != nil { + return false + } + + return len(cmp.Diff(upd.Object, obj.Object)) == 0 + }) + return nil +} + +func ResourceRestored(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + waitFor(ctx, func() bool { + yaml, err := kubectl("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + if err != nil { + return false + } + obj, err := toUnstructured(yaml) + if err != nil { + return false + } + ct := obj.GetCreationTimestamp() + + for i, removed := range sc.removedResources { + rct := removed.GetCreationTimestamp() + if removed.GetName() == obj.GetName() && removed.GetKind() == obj.GetKind() && rct.Before(&ct) { + switch rtype { + case "configmap": + if !reflect.DeepEqual(removed.Object["data"], obj.Object["data"]) { + return false + } + default: + if !reflect.DeepEqual(removed.Object["spec"], obj.Object["spec"]) { + return false + } + } + sc.removedResources = append(sc.removedResources[:i], sc.removedResources[i+1:]...) + return true + } + } + return false + }) + return nil +} + +func applyPermissionsToServiceAccount(ctx context.Context, serviceAccount, rbacTemplate string, keyValue ...string) error { + sc := scenarioCtx(ctx) + yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", rbacTemplate)) + if err != nil { + return fmt.Errorf("failed to read RBAC template yaml: %v", err) + } + + // Replace template variables + yaml := string(yamlContent) + yaml = strings.ReplaceAll(yaml, "{namespace}", sc.namespace) + yaml = strings.ReplaceAll(yaml, "{serviceaccount_name}", serviceAccount) + yaml = strings.ReplaceAll(yaml, "{clusterextension_name}", sc.clusterExtensionName) + if len(keyValue) > 0 { + for i := 0; i < len(keyValue); i += 2 { + yaml = strings.ReplaceAll(yaml, fmt.Sprintf("{%s}", keyValue[i]), keyValue[i+1]) + } + } + + // Apply the RBAC configuration + _, err = kubectlWithInput(yaml, "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply RBAC configuration: %v", err) + } + + return nil +} + +func ServiceAccountWithNeededPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "rbac-template.yaml") +} + +func ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "cluster-admin-rbac-template.yaml") +} + +func ServiceAccountWithFetchMetricsPermissions(ctx context.Context, serviceAccount string, controllerName string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "metrics-reader-rbac-template.yaml", "controller_name", controllerName) +} + +func httpGet(url string, token string) (*http.Response, error) { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // we don't care about the certificate + } + client := &http.Client{Transport: tr} + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+token) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + return resp, nil +} + +func SendMetricsRequest(ctx context.Context, serviceAccount string, endpoint string, controllerName string) error { + sc := scenarioCtx(ctx) + portForwardCmd := exec.Command("kubectl", "port-forward", "-n", olmNamespace, fmt.Sprintf("service/%s-service", controllerName), "8443:metrics") //nolint:gosec // perfectly safe to start port-forwarder for provided controller name + sc.backGroundCmds = append(sc.backGroundCmds, portForwardCmd) + if err := portForwardCmd.Start(); err != nil { + return err + } + token, err := kubectl("create", "token", serviceAccount, "-n", sc.namespace) + if err != nil { + return err + } + waitFor(ctx, func() bool { + resp, err := httpGet(fmt.Sprintf("https://localhost:8443%s", endpoint), token) + if err != nil { + return false + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + b, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + sc.metricsResponse = string(b) + return true + } + return false + }) + + return nil +} + +func CatalogIsUpdatedToVersion(name, version string) error { + ref, err := kubectl("get", "clustercatalog", fmt.Sprintf("%s-catalog", name), "-o", "jsonpath={.spec.source.image.ref}") + if err != nil { + return err + } + i := strings.LastIndexByte(ref, ':') + if i == -1 { + return fmt.Errorf("failed to find tag in image reference %s", ref) + } + base := ref[:i] + patch := map[string]any{ + "spec": map[string]any{ + "source": map[string]any{ + "image": map[string]any{ + "ref": fmt.Sprintf("%s:%s", base, version), + }, + }, + }, + } + pb, err := json.Marshal(patch) + if err != nil { + return err + } + _, err = kubectl("patch", "clustercatalog", fmt.Sprintf("%s-catalog", name), "--type", "merge", "-p", string(pb)) + return err +} + +func CatalogServesBundles(ctx context.Context, catalogName string) error { + yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", fmt.Sprintf("%s-catalog.yaml", catalogName))) + if err != nil { + return fmt.Errorf("failed to read catalog yaml: %v", err) + } + + _, err = kubectlWithInput(string(yamlContent), "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply catalog: %v", err) + } + + return nil +} + +func TagCatalogImage(name, oldTag, newTag string) error { + imageRef := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), fmt.Sprintf("e2e/%s-catalog:%s", name, oldTag)) + return crane.Tag(imageRef, newTag, crane.Insecure) +} + +func PrometheusMetricsAreReturned(ctx context.Context) error { + sc := scenarioCtx(ctx) + if sc.metricsResponse == "" { + return fmt.Errorf("metrics response is empty") + } + parser := expfmt.NewTextParser(model.UTF8Validation) + metricsFamilies, err := parser.TextToMetricFamilies(strings.NewReader(sc.metricsResponse)) + if err != nil { + return fmt.Errorf("failed to parse metrics response: %v", err) + } + if len(metricsFamilies) == 0 { + return fmt.Errorf("metrics response does not contain any metrics") + } + return nil +} + +func OperatorTargetNamespace(ctx context.Context, operator, namespace string) error { + sc := scenarioCtx(ctx) + namespace = substituteScenarioVars(namespace, sc) + raw, err := kubectl("get", "deployment", "-n", sc.namespace, operator, "-o", "json") + if err != nil { + return err + } + d := &appsv1.Deployment{} + if err := json.Unmarshal([]byte(raw), d); err != nil { + return err + } + + if tns := d.Spec.Template.Annotations["olm.targetNamespaces"]; tns != namespace { + return fmt.Errorf("expected target namespace %s, got %s", namespace, tns) + } + return nil +} diff --git a/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml new file mode 100644 index 0000000000..f37aed62da --- /dev/null +++ b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {namespace} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {serviceaccount_name} + namespace: {namespace} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {namespace}-{serviceaccount_name}-cluster-admin-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: {serviceaccount_name} + namespace: {namespace} diff --git a/test/e2e/steps/testdata/extra-catalog.yaml b/test/e2e/steps/testdata/extra-catalog.yaml new file mode 100644 index 0000000000..a918c08a56 --- /dev/null +++ b/test/e2e/steps/testdata/extra-catalog.yaml @@ -0,0 +1,11 @@ +apiVersion: olm.operatorframework.io/v1 +kind: ClusterCatalog +metadata: + name: extra-catalog +spec: + priority: 0 + source: + type: Image + image: + pollIntervalMinutes: 1 + ref: docker-registry.operator-controller-e2e.svc.cluster.local:5000/e2e/test-catalog:v1 diff --git a/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml new file mode 100644 index 0000000000..d3ba7355b9 --- /dev/null +++ b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {namespace} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {serviceaccount_name} + namespace: {namespace} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {controller_name}-metrics-reader-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {controller_name}-metrics-reader +subjects: + - kind: ServiceAccount + name: {serviceaccount_name} + namespace: {namespace} diff --git a/test/e2e/steps/testdata/rbac-template.yaml b/test/e2e/steps/testdata/rbac-template.yaml new file mode 100644 index 0000000000..8aa2c5e0e1 --- /dev/null +++ b/test/e2e/steps/testdata/rbac-template.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {namespace} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {serviceaccount_name} + namespace: {namespace} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {namespace}-{serviceaccount_name}-olm-admin-clusterrole +rules: + - apiGroups: [olm.operatorframework.io] + resources: [clusterextensions, clusterextensions/finalizers] + resourceNames: ["{clusterextension_name}"] + verbs: [update] + # Allow ClusterExtensionRevisions to set blockOwnerDeletion ownerReferences + - apiGroups: [olm.operatorframework.io] + resources: [clusterextensionrevisions, clusterextensionrevisions/finalizers] + verbs: [update, create, list, watch, get, delete, patch] + + - apiGroups: [apiextensions.k8s.io] + resources: [customresourcedefinitions] + verbs: [update, create, list, watch, get, delete, patch] + - apiGroups: [""] + resources: + - configmaps + - secrets + - services + - serviceaccounts + - events + - namespaces + verbs: [update, create, list, watch, get, delete, patch] + - apiGroups: ["apps"] + resources: + - deployments + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: + - clusterroles + - roles + - clusterrolebindings + - rolebindings + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: [create] + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: [create] + + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {namespace}-{serviceaccount_name}-install-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {namespace}-{serviceaccount_name}-olm-admin-clusterrole +subjects: + - kind: ServiceAccount + name: {serviceaccount_name} + namespace: {namespace} diff --git a/test/e2e/steps/testdata/test-catalog.yaml b/test/e2e/steps/testdata/test-catalog.yaml new file mode 100644 index 0000000000..8d47518f83 --- /dev/null +++ b/test/e2e/steps/testdata/test-catalog.yaml @@ -0,0 +1,11 @@ +apiVersion: olm.operatorframework.io/v1 +kind: ClusterCatalog +metadata: + name: test-catalog +spec: + priority: 0 + source: + type: Image + image: + pollIntervalMinutes: 1 + ref: docker-registry.operator-controller-e2e.svc.cluster.local:5000/e2e/test-catalog:v1 diff --git a/test/e2e/webhook_support_test.go b/test/e2e/webhook_support_test.go deleted file mode 100644 index 1c80c615be..0000000000 --- a/test/e2e/webhook_support_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -var dynamicClient dynamic.Interface - -func TestWebhookSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, "WebhookProviderCertManager") - t.Log("Test support for bundles with webhooks") - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) - - if dynamicClient == nil { - var err error - dynamicClient, err = dynamic.NewForConfig(cfg) - require.NoError(t, err) - } - - t.Log("By creating install namespace, and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the webhook-operator ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By installing the webhook-operator ClusterExtension") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "webhook-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for webhook-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By waiting for webhook-operator deployment to be available") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "webhook-operator-controller-manager"}, deployment)) - available := false - for _, cond := range deployment.Status.Conditions { - if cond.Type == appsv1.DeploymentAvailable { - available = cond.Status == corev1.ConditionTrue - } - } - require.True(ct, available) - }, pollDuration, pollInterval) - - v1Gvr := schema.GroupVersionResource{ - Group: "webhook.operators.coreos.io", - Version: "v1", - Resource: "webhooktests", - } - v1Client := dynamicClient.Resource(v1Gvr).Namespace(namespace.GetName()) - - t.Log("By eventually seeing that invalid CR creation is rejected by the validating webhook") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - obj := getWebhookOperatorResource("invalid-test-cr", namespace.GetName(), false) - _, err := v1Client.Create(t.Context(), obj, metav1.CreateOptions{}) - require.Error(ct, err) - require.Contains(ct, err.Error(), "Invalid value: false: Spec.Valid must be true") - }, pollDuration, pollInterval) - - var ( - res *unstructured.Unstructured - err error - obj = getWebhookOperatorResource("valid-test-cr", namespace.GetName(), true) - ) - - t.Log("By eventually creating a valid CR") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - res, err = v1Client.Create(t.Context(), obj, metav1.CreateOptions{}) - require.NoError(ct, err) - }, pollDuration, pollInterval) - t.Cleanup(func() { - require.NoError(t, v1Client.Delete(context.Background(), obj.GetName(), metav1.DeleteOptions{})) - }) - - require.Equal(t, map[string]interface{}{ - "valid": true, - "mutate": true, - }, res.Object["spec"]) - - t.Log("By checking a valid CR is converted to v2 by the conversion webhook") - v2Gvr := schema.GroupVersionResource{ - Group: "webhook.operators.coreos.io", - Version: "v2", - Resource: "webhooktests", - } - v2Client := dynamicClient.Resource(v2Gvr).Namespace(namespace.GetName()) - - t.Log("By eventually getting the valid CR with a v2 client") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - res, err = v2Client.Get(t.Context(), obj.GetName(), metav1.GetOptions{}) - require.NoError(ct, err) - }, pollDuration, pollInterval) - - t.Log("and verifying that the CR is correctly converted") - require.Equal(t, map[string]interface{}{ - "conversion": map[string]interface{}{ - "valid": true, - "mutate": true, - }, - }, res.Object["spec"]) -} - -func getWebhookOperatorResource(name string, namespace string, valid bool) *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "webhook.operators.coreos.io/v1", - "kind": "webhooktests", - "metadata": map[string]interface{}{ - "name": name, - "namespace": namespace, - }, - "spec": map[string]interface{}{ - "valid": valid, - }, - }, - } -} From c272a608462f0cc6f63f0e17c6b8a45011909ceb Mon Sep 17 00:00:00 2001 From: Predrag Knezevic Date: Tue, 2 Dec 2025 16:00:01 +0100 Subject: [PATCH 2/2] Address reviewer comments --- test/e2e/features/install.feature | 2 +- test/e2e/features_test.go | 15 ++------------- test/e2e/steps/hooks.go | 27 ++++++++++++++++++++------ test/e2e/steps/steps.go | 32 +++++++++++++++++++------------ 4 files changed, 44 insertions(+), 32 deletions(-) diff --git a/test/e2e/features/install.feature b/test/e2e/features/install.feature index 9aebd87612..f04ec48ab0 100644 --- a/test/e2e/features/install.feature +++ b/test/e2e/features/install.feature @@ -8,7 +8,7 @@ Feature: Install ClusterExtension And "test" catalog serves bundles And Service account "olm-sa" with needed permissions is available in test namespace - Scenario Outline: Install latest available version from the default channel + Scenario Outline: Install latest available version When ClusterExtension is applied """ apiVersion: olm.operatorframework.io/v1 diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go index 7ef8750092..0d27f4d668 100644 --- a/test/e2e/features_test.go +++ b/test/e2e/features_test.go @@ -1,7 +1,6 @@ package e2e import ( - //"context" "fmt" "log" "os" @@ -10,9 +9,6 @@ import ( "github.com/cucumber/godog" "github.com/cucumber/godog/colors" "github.com/spf13/pflag" - ctrl "sigs.k8s.io/controller-runtime" - //ctrllog "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" utils "github.com/operator-framework/operator-controller/internal/shared/util/testutils" "github.com/operator-framework/operator-controller/test/e2e/steps" @@ -25,11 +21,7 @@ var opts = godog.Options{ Concurrency: 1, } -var logOpts = zap.Options{} - func init() { - flagSet := pflag.CommandLine - flagSet.BoolVar(&logOpts.Development, "log.debug", false, "print debug log level") godog.BindCommandLineFlags("godog.", &opts) } @@ -38,9 +30,6 @@ func TestMain(m *testing.M) { pflag.Parse() opts.Paths = pflag.Args() - ctrl.SetLogger(zap.New(zap.UseFlagOptions(&logOpts))) - - //opts.DefaultContext = ctrl.LoggerInto(context.Background(), ctrllog.Log) // run tests sc := godog.TestSuite{ TestSuiteInitializer: InitializeSuite, @@ -53,7 +42,7 @@ func TestMain(m *testing.M) { path := os.Getenv("E2E_SUMMARY_OUTPUT") if path == "" { - fmt.Printf("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation\n") + fmt.Println("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation") } else { if err := utils.PrintSummary(path); err != nil { // Fail the run if alerts are found @@ -72,7 +61,7 @@ func TestMain(m *testing.M) { } func InitializeSuite(tc *godog.TestSuiteContext) { - tc.BeforeSuite(steps.DetectEnabledFeatureGates) + tc.BeforeSuite(steps.BeforeSuite) } func InitializeScenario(sc *godog.ScenarioContext) { diff --git a/test/e2e/steps/hooks.go b/test/e2e/steps/hooks.go index e426aa0982..fafa8f12ed 100644 --- a/test/e2e/steps/hooks.go +++ b/test/e2e/steps/hooks.go @@ -10,8 +10,11 @@ import ( "strconv" "github.com/cucumber/godog" + "github.com/go-logr/logr" + "github.com/spf13/pflag" "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) type resource struct { @@ -35,8 +38,17 @@ const ( scenarioContextKey contextKey = "scenario-context" ) -var featureGates = map[string]bool{ - "WebhookProviderCertManager": true, +var ( + logOpts = zap.Options{} + featureGates = map[string]bool{ + "WebhookProviderCertManager": true, + } + logger logr.Logger +) + +func init() { + flagSet := pflag.CommandLine + flagSet.BoolVar(&logOpts.Development, "log.debug", false, "print debug log level") } func RegisterHooks(sc *godog.ScenarioContext) { @@ -46,7 +58,9 @@ func RegisterHooks(sc *godog.ScenarioContext) { sc.After(ScenarioCleanup) } -func DetectEnabledFeatureGates() { +func BeforeSuite() { + logger = zap.New(zap.UseFlagOptions(&logOpts)) + raw, err := kubectl("get", "deployment", "-n", olmNamespace, olmDeploymentName, "-o", "json") if err != nil { return @@ -95,9 +109,10 @@ func scenarioCtx(ctx context.Context) *scenarioContext { func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context.Context, error) { sc := scenarioCtx(ctx) - for _, p := range sc.backGroundCmds { - p.Process.Kill() // nolint: errcheck // we don't care about the error here, we just want to kill the process - p.Process.Wait() // nolint: errcheck // same as above, we just want to wait for the process to exit, and do not want to fail the test if it does not + for _, bgCmd := range sc.backGroundCmds { + if p := bgCmd.Process; p != nil { + _ = p.Kill() + } } if err != nil { return ctx, err diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go index 399234352a..70f29f59f8 100644 --- a/test/e2e/steps/steps.go +++ b/test/e2e/steps/steps.go @@ -22,11 +22,11 @@ import ( "github.com/google/go-containerregistry/pkg/crane" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "github.com/spf13/pflag" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/yaml" ) @@ -37,7 +37,10 @@ const ( tick = 1 * time.Second ) -var kubeconfigPath string +var ( + kubeconfigPath string + k8sCli string +) func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^OLM is available$`, OLMisAvailable) @@ -68,23 +71,29 @@ func RegisterSteps(sc *godog.ScenarioContext) { } func init() { - kubeconfigPath = os.Getenv("HOME") + "/.kube/config" + flagSet := pflag.CommandLine + flagSet.StringVar(&k8sCli, "k8s.cli", "kubectl", "Path to k8s cli") + if v, found := os.LookupEnv("KUBECONFIG"); found { + kubeconfigPath = v + } else { + home, err := os.UserHomeDir() + if err != nil { + panic(fmt.Sprintf("cannot determine user home directory: %v", err)) + } + flagSet.StringVar(&kubeconfigPath, "kubeconfig", filepath.Join(home, ".kube", "config"), "Paths to a kubeconfig. Only required if out-of-cluster.") + } } -var ( - logger = log.Log -) - func kubectl(args ...string) (string, error) { - cmd := exec.Command("kubectl", args...) - logger.V(1).Info(strings.Join(cmd.Args, " ")) + cmd := exec.Command(k8sCli, args...) + logger.V(1).Info("Running", "command", strings.Join(cmd.Args, " ")) cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) b, err := cmd.Output() return string(b), err } func kubectlWithInput(yaml string, args ...string) (string, error) { - cmd := exec.Command("kubectl", args...) + cmd := exec.Command(k8sCli, args...) cmd.Stdin = bytes.NewBufferString(yaml) cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) b, err := cmd.Output() @@ -143,7 +152,6 @@ func ResourceApplyFails(ctx context.Context, errMsg string, yamlTemplate *godog. _, err := kubectlWithInput(yamlContent, "apply", "-f", "-") if err == nil { return false - //return fmt.Errorf("expected apply to fail, got: %s", out) } if stdErr := string(func() *exec.ExitError { target := &exec.ExitError{} @@ -151,7 +159,6 @@ func ResourceApplyFails(ctx context.Context, errMsg string, yamlTemplate *godog. return target }().Stderr); !strings.Contains(stdErr, errMsg) { return false - //return fmt.Errorf("expected error message %s to be in stderr, got: %s", errMsg, stdErr) } return true }) @@ -436,6 +443,7 @@ func SendMetricsRequest(ctx context.Context, serviceAccount string, endpoint str return false } defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { b, err := io.ReadAll(resp.Body) if err != nil {