diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index dbd4508d3..284aa1a3e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -36,12 +36,35 @@ The workflow defined above implies that the community is always ready for discus
Please keep this workflow in mind as you read through the document.
+## How to Build and Deploy Locally
+
+After creating a fork and cloning the project locally,
+you can follow the steps below to test your changes:
+
+1. Create the cluster:
+
+ ```sh
+ kind create cluster operator-controller
+ ```
+
+2. Build your changes:
+
+ ```sh
+ make build docker-build
+ ```
+
+3. Load the image locally and Deploy to Kind
+
+ ```sh
+ make kind-load kind-deploy
+ ```
+
### Communication Channels
- Email: [operator-framework-olm-dev](mailto:operator-framework-olm-dev@googlegroups.com)
- Slack: [#olm-dev](https://kubernetes.slack.com/archives/C0181L6JYQ2)
- Google Group: [olm-gg](https://groups.google.com/g/operator-framework-olm-dev)
-- Weekly in Person Working Group Meeting: [olm-wg](https://github.com/operator-framework/community#operator-lifecycle-manager-working-group)
+- Weekly in Person Working Group Meeting: [olm-wg](https://github.com/operator-framework/community#operator-lifecycle-manager-working-group)
## How are Milestones Designed?
@@ -68,7 +91,7 @@ As discussed earlier, the operator-controller adheres to a microservice architec
## Submitting Issues
-Unsure where to submit an issue?
+Unsure where to submit an issue?
- [The Operator-Controller project](https://github.com/operator-framework/operator-controller/), which is the top level component allowing users to specify operators they'd like to install.
- [The Catalogd project](https://github.com/operator-framework/catalogd/), which hosts operator content and helps users discover installable content.
diff --git a/Makefile b/Makefile
index 49a707b3c..5e8033f51 100644
--- a/Makefile
+++ b/Makefile
@@ -165,12 +165,16 @@ test-unit: $(SETUP_ENVTEST) #HELP Run the unit tests
$(UNIT_TEST_DIRS) \
-test.gocoverdir=$(ROOT_DIR)/coverage/unit
-image-registry: ## Setup in-cluster image registry
- ./hack/test/image-registry.sh $(E2E_REGISTRY_NAMESPACE) $(E2E_REGISTRY_NAME)
-
-build-push-e2e-catalog: ## Build the testdata catalog used for e2e tests and push it to the image registry
- ./hack/test/build-push-e2e-catalog.sh $(E2E_REGISTRY_NAMESPACE) $(LOCAL_REGISTRY_HOST)/$(E2E_TEST_CATALOG_V1)
- ./hack/test/build-push-e2e-catalog.sh $(E2E_REGISTRY_NAMESPACE) $(LOCAL_REGISTRY_HOST)/$(E2E_TEST_CATALOG_V2)
+.PHONY: image-registry
+E2E_REGISTRY_IMAGE=localhost/e2e-test-registry:devel
+image-registry: export GOOS=linux
+image-registry: export GOARCH=amd64
+image-registry: ## Build the testdata catalog used for e2e tests and push it to the image registry
+ go build $(GO_BUILD_FLAGS) -tags '$(GO_BUILD_TAGS)' -ldflags '$(GO_BUILD_LDFLAGS)' -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o ./testdata/registry/bin/registry ./testdata/registry/registry.go
+ go build $(GO_BUILD_FLAGS) -tags '$(GO_BUILD_TAGS)' -ldflags '$(GO_BUILD_LDFLAGS)' -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o ./testdata/push/bin/push ./testdata/push/push.go
+ $(CONTAINER_RUNTIME) build -f ./testdata/Dockerfile -t $(E2E_REGISTRY_IMAGE) ./testdata
+ $(CONTAINER_RUNTIME) save $(E2E_REGISTRY_IMAGE) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME)
+ ./testdata/build-test-registry.sh $(E2E_REGISTRY_NAMESPACE) $(E2E_REGISTRY_NAME) $(E2E_REGISTRY_IMAGE)
# When running the e2e suite, you can set the ARTIFACT_PATH variable to the absolute path
# of the directory for the operator-controller e2e tests to store the artifacts, which
@@ -181,7 +185,7 @@ build-push-e2e-catalog: ## Build the testdata catalog used for e2e tests and pus
test-e2e: KIND_CLUSTER_NAME := operator-controller-e2e
test-e2e: KUSTOMIZE_BUILD_DIR := config/overlays/e2e
test-e2e: GO_BUILD_FLAGS := -cover
-test-e2e: run image-registry build-push-e2e-catalog registry-load-bundles e2e e2e-coverage kind-clean #HELP Run e2e test suite on local kind cluster
+test-e2e: run image-registry e2e e2e-coverage kind-clean #HELP Run e2e test suite on local kind cluster
.PHONY: extension-developer-e2e
extension-developer-e2e: KUSTOMIZE_BUILD_DIR := config/overlays/cert-manager
@@ -205,7 +209,7 @@ post-upgrade-checks:
test-upgrade-e2e: KIND_CLUSTER_NAME := operator-controller-upgrade-e2e
test-upgrade-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog
test-upgrade-e2e: export TEST_CLUSTER_EXTENSION_NAME := test-package
-test-upgrade-e2e: kind-cluster run-latest-release image-registry build-push-e2e-catalog registry-load-bundles pre-upgrade-setup docker-build kind-load kind-deploy post-upgrade-checks kind-clean #HELP Run upgrade e2e tests on a local kind cluster
+test-upgrade-e2e: kind-cluster run-latest-release image-registry pre-upgrade-setup docker-build kind-load kind-deploy post-upgrade-checks kind-clean #HELP Run upgrade e2e tests on a local kind cluster
.PHONY: e2e-coverage
e2e-coverage:
@@ -231,12 +235,6 @@ kind-cluster: $(KIND) #EXHELP Standup a kind cluster.
kind-clean: $(KIND) #EXHELP Delete the kind cluster.
$(KIND) delete cluster --name $(KIND_CLUSTER_NAME)
-registry-load-bundles: ## Load selected e2e testdata container images created in kind-load-bundles into registry
- testdata/bundles/registry-v1/build-push-e2e-bundle.sh ${E2E_REGISTRY_NAMESPACE} $(LOCAL_REGISTRY_HOST)/bundles/registry-v1/prometheus-operator:v1.0.0 prometheus-operator.v1.0.0 prometheus-operator.v1.0.0
- testdata/bundles/registry-v1/build-push-e2e-bundle.sh ${E2E_REGISTRY_NAMESPACE} $(LOCAL_REGISTRY_HOST)/bundles/registry-v1/prometheus-operator:v1.0.1 prometheus-operator.v1.0.1 prometheus-operator.v1.0.0
- testdata/bundles/registry-v1/build-push-e2e-bundle.sh ${E2E_REGISTRY_NAMESPACE} $(LOCAL_REGISTRY_HOST)/bundles/registry-v1/prometheus-operator:v1.2.0 prometheus-operator.v1.2.0 prometheus-operator.v1.0.0
- testdata/bundles/registry-v1/build-push-e2e-bundle.sh ${E2E_REGISTRY_NAMESPACE} $(LOCAL_REGISTRY_HOST)/bundles/registry-v1/prometheus-operator:v2.0.0 prometheus-operator.v2.0.0 prometheus-operator.v1.0.0
-
#SECTION Build
ifeq ($(origin VERSION), undefined)
diff --git a/PROJECT b/PROJECT
index 50ac542dc..a307347a4 100644
--- a/PROJECT
+++ b/PROJECT
@@ -11,8 +11,8 @@ resources:
domain: operatorframework.io
group: olm
kind: ClusterExtension
- path: github.com/operator-framework/operator-controller/api/v1alpha1
- version: v1alpha1
+ path: github.com/operator-framework/operator-controller/api/v1
+ version: v1
- api:
crdVersion: v1
namespaced: true
@@ -20,6 +20,6 @@ resources:
domain: operatorframework.io
group: olm
kind: Extension
- path: github.com/operator-framework/operator-controller/api/v1alpha1
- version: v1alpha1
+ path: github.com/operator-framework/operator-controller/api/v1
+ version: v1
version: "3"
diff --git a/api/v1alpha1/clusterextension_types.go b/api/v1/clusterextension_types.go
similarity index 53%
rename from api/v1alpha1/clusterextension_types.go
rename to api/v1/clusterextension_types.go
index 78608ba3a..696966c5a 100644
--- a/api/v1alpha1/clusterextension_types.go
+++ b/api/v1/clusterextension_types.go
@@ -14,19 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- "github.com/operator-framework/operator-controller/internal/conditionsets"
)
var ClusterExtensionKind = "ClusterExtension"
type (
- UpgradeConstraintPolicy string
- CRDUpgradeSafetyPolicy string
+ UpgradeConstraintPolicy string
+ CRDUpgradeSafetyEnforcement string
)
const (
@@ -45,6 +43,34 @@ const (
// ClusterExtensionSpec defines the desired state of ClusterExtension
type ClusterExtensionSpec struct {
+ // namespace is a reference to a Kubernetes namespace.
+ // This is the namespace in which the provided ServiceAccount must exist.
+ // It also designates the default namespace where namespace-scoped resources
+ // for the extension are applied to the cluster.
+ // Some extensions may contain namespace-scoped resources to be applied in other namespaces.
+ // This namespace must exist.
+ //
+ // namespace is required, immutable, and follows the DNS label standard
+ // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
+ // start and end with an alphanumeric character, and be no longer than 63 characters
+ //
+ // [RFC 1123]: https://tools.ietf.org/html/rfc1123
+ //
+ // +kubebuilder:validation:MaxLength:=63
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="namespace is immutable"
+ // +kubebuilder:validation:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$\")",message="namespace must be a valid DNS1123 label"
+ // +kubebuilder:validation:Required
+ Namespace string `json:"namespace"`
+
+ // serviceAccount is a reference to a ServiceAccount used to perform all interactions
+ // with the cluster that are required to manage the extension.
+ // The ServiceAccount must be configured with the necessary permissions to perform these interactions.
+ // The ServiceAccount must exist in the namespace referenced in the spec.
+ // serviceAccount is required.
+ //
+ // +kubebuilder:validation:Required
+ ServiceAccount ServiceAccountReference `json:"serviceAccount"`
+
// source is a required field which selects the installation source of content
// for this ClusterExtension. Selection is performed by setting the sourceType.
//
@@ -58,111 +84,69 @@ type ClusterExtensionSpec struct {
// catalog:
// packageName: example-package
//
+ // +kubebuilder:validation:Required
Source SourceConfig `json:"source"`
- // install is a required field used to configure the installation options
- // for the ClusterExtension such as the installation namespace,
- // the service account and the pre-flight check configuration.
+ // install is an optional field used to configure the installation options
+ // for the ClusterExtension such as the pre-flight check configuration.
//
- // Below is a minimal example of an installation definition (in yaml):
- // install:
- // namespace: example-namespace
- // serviceAccount:
- // name: example-sa
- Install ClusterExtensionInstallConfig `json:"install"`
+ // +optional
+ Install *ClusterExtensionInstallConfig `json:"install,omitempty"`
}
const SourceTypeCatalog = "Catalog"
// SourceConfig is a discriminated union which selects the installation source.
+//
// +union
-// +kubebuilder:validation:XValidation:rule="self.sourceType == 'Catalog' && has(self.catalog)",message="sourceType Catalog requires catalog field"
+// +kubebuilder:validation:XValidation:rule="has(self.sourceType) && self.sourceType == 'Catalog' ? has(self.catalog) : !has(self.catalog)",message="catalog is required when sourceType is Catalog, and forbidden otherwise"
type SourceConfig struct {
// sourceType is a required reference to the type of install source.
//
- // Allowed values are ["Catalog"]
+ // Allowed values are "Catalog"
//
- // When this field is set to "Catalog", information for determining the appropriate
- // bundle of content to install will be fetched from ClusterCatalog resources existing
- // on the cluster. When using the Catalog sourceType, the catalog field must also be set.
+ // When this field is set to "Catalog", information for determining the
+ // appropriate bundle of content to install will be fetched from
+ // ClusterCatalog resources existing on the cluster.
+ // When using the Catalog sourceType, the catalog field must also be set.
//
// +unionDiscriminator
// +kubebuilder:validation:Enum:="Catalog"
+ // +kubebuilder:validation:Required
SourceType string `json:"sourceType"`
- // catalog is used to configure how information is sourced from a catalog. This field must be defined when sourceType is set to "Catalog",
- // and must be the only field defined for this sourceType.
+ // catalog is used to configure how information is sourced from a catalog.
+ // This field is required when sourceType is "Catalog", and forbidden otherwise.
//
- // +optional.
+ // +optional
Catalog *CatalogSource `json:"catalog,omitempty"`
}
// ClusterExtensionInstallConfig is a union which selects the clusterExtension installation config.
// ClusterExtensionInstallConfig requires the namespace and serviceAccount which should be used for the installation of packages.
+//
+// +kubebuilder:validation:XValidation:rule="has(self.preflight)",message="at least one of [preflight] are required when install is specified"
// +union
type ClusterExtensionInstallConfig struct {
- // namespace is a reference to the Namespace in which the bundle of
- // content for the package referenced in the packageName field will be applied.
- // The bundle may contain cluster-scoped resources or resources that are
- // applied to other Namespaces. This Namespace is expected to exist.
- //
- // namespace is required, immutable, and follows the DNS label standard
- // as defined in [RFC 1123]. This means that valid values:
- // - Contain no more than 63 characters
- // - Contain only lowercase alphanumeric characters or '-'
- // - Start with an alphanumeric character
- // - End with an alphanumeric character
- //
- // Some examples of valid values are:
- // - some-namespace
- // - 123-namespace
- // - 1-namespace-2
- // - somenamespace
- //
- // Some examples of invalid values are:
- // - -some-namespace
- // - some-namespace-
- // - thisisareallylongnamespacenamethatisgreaterthanthemaximumlength
- // - some.namespace
- //
- // [RFC 1123]: https://tools.ietf.org/html/rfc1123
- //
- //+kubebuilder:validation:Pattern:=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
- //+kubebuilder:validation:MaxLength:=63
- //+kubebuilder:validation:XValidation:rule="self == oldSelf",message="namespace is immutable"
- Namespace string `json:"namespace"`
-
- // serviceAccount is a required reference to a ServiceAccount that exists
- // in the installNamespace. The provided ServiceAccount is used to install and
- // manage the content for the package specified in the packageName field.
- //
- // In order to successfully install and manage the content for the package,
- // the ServiceAccount provided via this field should be configured with the
- // appropriate permissions to perform the necessary operations on all the
- // resources that are included in the bundle of content being applied.
- ServiceAccount ServiceAccountReference `json:"serviceAccount"`
-
- // preflight is an optional field that can be used to configure the preflight checks run before installation or upgrade of the content for the package specified in the packageName field.
- //
- // When specified, it overrides the default configuration of the preflight checks that are required to execute successfully during an install/upgrade operation.
+ // preflight is an optional field that can be used to configure the checks that are
+ // run before installation or upgrade of the content for the package specified in the packageName field.
//
- // When not specified, the default configuration for each preflight check will be used.
+ // When specified, it replaces the default preflight configuration for install/upgrade actions.
+ // When not specified, the default configuration will be used.
//
- //+optional
+ // +optional
Preflight *PreflightConfig `json:"preflight,omitempty"`
}
-// CatalogSource defines the required fields for catalog source.
+// CatalogSource defines the attributes used to identify and filter content from a catalog.
type CatalogSource struct {
// packageName is a reference to the name of the package to be installed
// and is used to filter the content from catalogs.
//
- // This field is required, immutable and follows the DNS subdomain name
- // standard as defined in [RFC 1123]. This means that valid entries:
- // - Contain no more than 253 characters
- // - Contain only lowercase alphanumeric characters, '-', or '.'
- // - Start with an alphanumeric character
- // - End with an alphanumeric character
+ // packageName is required, immutable, and follows the DNS subdomain standard
+ // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ // hyphens (-) or periods (.), start and end with an alphanumeric character,
+ // and be no longer than 253 characters.
//
// Some examples of valid values are:
// - some-package
@@ -178,9 +162,11 @@ type CatalogSource struct {
//
// [RFC 1123]: https://tools.ietf.org/html/rfc1123
//
- //+kubebuilder:validation:MaxLength:=253
- //+kubebuilder:validation:Pattern:=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
- //+kubebuilder:validation:XValidation:rule="self == oldSelf",message="packageName is immutable"
+ // +kubebuilder:validation.Required
+ // +kubebuilder:validation:MaxLength:=253
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="packageName is immutable"
+ // +kubebuilder:validation:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters"
+ // +kubebuilder:validation:Required
PackageName string `json:"packageName"`
// version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
@@ -257,15 +243,20 @@ type CatalogSource struct {
//
// For more information on semver, please see https://semver.org/
//
- //+kubebuilder:validation:MaxLength:=64
- //+kubebuilder:validation:Pattern=`^(\s*(=||!=|>|<|>=|=>|<=|=<|~|~>|\^)\s*(v?(0|[1-9]\d*|[x|X|\*])(\.(0|[1-9]\d*|x|X|\*]))?(\.(0|[1-9]\d*|x|X|\*))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)((?:\s+|,\s*|\s*\|\|\s*)(=||!=|>|<|>=|=>|<=|=<|~|~>|\^)\s*(v?(0|[1-9]\d*|x|X|\*])(\.(0|[1-9]\d*|x|X|\*))?(\.(0|[1-9]\d*|x|X|\*]))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)*$`
- //+optional
+ // +kubebuilder:validation:MaxLength:=64
+ // +kubebuilder:validation:XValidation:rule="self.matches(\"^(\\\\s*(=||!=|>|<|>=|=>|<=|=<|~|~>|\\\\^)\\\\s*(v?(0|[1-9]\\\\d*|[x|X|\\\\*])(\\\\.(0|[1-9]\\\\d*|x|X|\\\\*]))?(\\\\.(0|[1-9]\\\\d*|x|X|\\\\*))?(-([0-9A-Za-z\\\\-]+(\\\\.[0-9A-Za-z\\\\-]+)*))?(\\\\+([0-9A-Za-z\\\\-]+(\\\\.[0-9A-Za-z\\\\-]+)*))?)\\\\s*)((?:\\\\s+|,\\\\s*|\\\\s*\\\\|\\\\|\\\\s*)(=||!=|>|<|>=|=>|<=|=<|~|~>|\\\\^)\\\\s*(v?(0|[1-9]\\\\d*|x|X|\\\\*])(\\\\.(0|[1-9]\\\\d*|x|X|\\\\*))?(\\\\.(0|[1-9]\\\\d*|x|X|\\\\*]))?(-([0-9A-Za-z\\\\-]+(\\\\.[0-9A-Za-z\\\\-]+)*))?(\\\\+([0-9A-Za-z\\\\-]+(\\\\.[0-9A-Za-z\\\\-]+)*))?)\\\\s*)*$\")",message="invalid version expression"
+ // +optional
Version string `json:"version,omitempty"`
// channels is an optional reference to a set of channels belonging to
// the package specified in the packageName field.
//
- // A "channel" is a package author defined stream of updates for an extension.
+ // A "channel" is a package-author-defined stream of updates for an extension.
+ //
+ // Each channel in the list must follow the DNS subdomain standard
+ // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ // hyphens (-) or periods (.), start and end with an alphanumeric character,
+ // and be no longer than 253 characters. No more than 256 channels can be specified.
//
// When specified, it is used to constrain the set of installable bundles and
// the automated upgrade path. This constraint is an AND operation with the
@@ -277,13 +268,6 @@ type CatalogSource struct {
//
// When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
//
- // This field follows the DNS subdomain name standard as defined in [RFC
- // 1123]. This means that valid entries:
- // - Contain no more than 253 characters
- // - Contain only lowercase alphanumeric characters, '-', or '.'
- // - Start with an alphanumeric character
- // - End with an alphanumeric character
- //
// Some examples of valid values are:
// - 1.1.x
// - alpha
@@ -303,9 +287,10 @@ type CatalogSource struct {
//
// [RFC 1123]: https://tools.ietf.org/html/rfc1123
//
- //+kubebuilder:validation:items:MaxLength:=253
- //+kubebuilder:validation:items:Pattern:=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
- //+optional
+ // +kubebuilder:validation:items:MaxLength:=253
+ // +kubebuilder:validation:MaxItems:=256
+ // +kubebuilder:validation:items:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="channels entries must be valid DNS1123 subdomains"
+ // +optional
Channels []string `json:"channels,omitempty"`
// selector is an optional field that can be used
@@ -315,14 +300,14 @@ type CatalogSource struct {
// When unspecified, all ClusterCatalogs will be used in
// the bundle selection process.
//
- //+optional
- Selector metav1.LabelSelector `json:"selector,omitempty"`
+ // +optional
+ Selector *metav1.LabelSelector `json:"selector,omitempty"`
// upgradeConstraintPolicy is an optional field that controls whether
// the upgrade path(s) defined in the catalog are enforced for the package
// referenced in the packageName field.
//
- // Allowed values are: ["CatalogProvided", "SelfCertified"].
+ // Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
//
// When this field is set to "CatalogProvided", automatic upgrades will only occur
// when upgrade constraints specified by the package author are met.
@@ -334,28 +319,26 @@ type CatalogSource struct {
// loss. It is assumed that users have independently verified changes when
// using this option.
//
- // If unspecified, the default value is "CatalogProvided".
+ // When this field is omitted, the default value is "CatalogProvided".
//
- //+kubebuilder:validation:Enum:=CatalogProvided;SelfCertified
- //+kubebuilder:default:=CatalogProvided
- //+optional
+ // +kubebuilder:validation:Enum:=CatalogProvided;SelfCertified
+ // +kubebuilder:default:=CatalogProvided
+ // +optional
UpgradeConstraintPolicy UpgradeConstraintPolicy `json:"upgradeConstraintPolicy,omitempty"`
}
-// ServiceAccountReference references a serviceAccount.
+// ServiceAccountReference identifies the serviceAccount used fo install a ClusterExtension.
type ServiceAccountReference struct {
// name is a required, immutable reference to the name of the ServiceAccount
// to be used for installation and management of the content for the package
// specified in the packageName field.
//
- // This ServiceAccount is expected to exist in the installNamespace.
+ // This ServiceAccount must exist in the installNamespace.
//
- // This field follows the DNS subdomain name standard as defined in [RFC
- // 1123]. This means that valid values:
- // - Contain no more than 253 characters
- // - Contain only lowercase alphanumeric characters, '-', or '.'
- // - Start with an alphanumeric character
- // - End with an alphanumeric character
+ // name follows the DNS subdomain standard as defined in [RFC 1123].
+ // It must contain only lowercase alphanumeric characters,
+ // hyphens (-) or periods (.), start and end with an alphanumeric character,
+ // and be no longer than 253 characters.
//
// Some examples of valid values are:
// - some-serviceaccount
@@ -370,13 +353,15 @@ type ServiceAccountReference struct {
//
// [RFC 1123]: https://tools.ietf.org/html/rfc1123
//
- //+kubebuilder:validation:MaxLength:=253
- //+kubebuilder:validation:Pattern:=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
- //+kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
+ // +kubebuilder:validation:MaxLength:=253
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
+ // +kubebuilder:validation:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="name must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters"
+ // +kubebuilder:validation:Required
Name string `json:"name"`
}
// PreflightConfig holds the configuration for the preflight checks. If used, at least one preflight check must be non-nil.
+//
// +kubebuilder:validation:XValidation:rule="has(self.crdUpgradeSafety)",message="at least one of [crdUpgradeSafety] are required when preflight is specified"
type PreflightConfig struct {
// crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
@@ -384,34 +369,28 @@ type PreflightConfig struct {
//
// The CRD Upgrade Safety pre-flight check safeguards from unintended
// consequences of upgrading a CRD, such as data loss.
- //
- // This field is required if the spec.install.preflight field is specified.
CRDUpgradeSafety *CRDUpgradeSafetyPreflightConfig `json:"crdUpgradeSafety"`
}
// CRDUpgradeSafetyPreflightConfig is the configuration for CRD upgrade safety preflight check.
type CRDUpgradeSafetyPreflightConfig struct {
- // policy is used to configure the state of the CRD Upgrade Safety pre-flight check.
- //
- // This field is required when the spec.install.preflight.crdUpgradeSafety field is
- // specified.
+ // enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
//
- // Allowed values are ["Enabled", "Disabled"]. The default value is "Enabled".
+ // Allowed values are "None" or "Strict". The default value is "Strict".
//
- // When set to "Disabled", the CRD Upgrade Safety pre-flight check will be skipped
+ // When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
// when performing an upgrade operation. This should be used with caution as
// unintended consequences such as data loss can occur.
//
- // When set to "Enabled", the CRD Upgrade Safety pre-flight check will be run when
+ // When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
// performing an upgrade operation.
//
- //+kubebuilder:validation:Enum:="Enabled";"Disabled"
- //+kubebuilder:default:=Enabled
- Policy CRDUpgradeSafetyPolicy `json:"policy"`
+ // +kubebuilder:validation:Enum:="None";"Strict"
+ // +kubebuilder:validation:Required
+ Enforcement CRDUpgradeSafetyEnforcement `json:"enforcement"`
}
const (
- // TODO(user): add more Types, here and into init()
TypeInstalled = "Installed"
TypeProgressing = "Progressing"
@@ -428,110 +407,111 @@ const (
ReasonBlocked = "Blocked"
ReasonRetrying = "Retrying"
- CRDUpgradeSafetyPolicyEnabled CRDUpgradeSafetyPolicy = "Enabled"
- CRDUpgradeSafetyPolicyDisabled CRDUpgradeSafetyPolicy = "Disabled"
+ // None will not perform CRD upgrade safety checks.
+ CRDUpgradeSafetyEnforcementNone CRDUpgradeSafetyEnforcement = "None"
+ // Strict will enforce the CRD upgrade safety check and block the upgrade if the CRD would not pass the check.
+ CRDUpgradeSafetyEnforcementStrict CRDUpgradeSafetyEnforcement = "Strict"
)
-func init() {
- // TODO(user): add Types from above
- conditionsets.ConditionTypes = append(conditionsets.ConditionTypes,
- TypeInstalled,
- TypeDeprecated,
- TypePackageDeprecated,
- TypeChannelDeprecated,
- TypeBundleDeprecated,
- TypeProgressing,
- )
- // TODO(user): add Reasons from above
- conditionsets.ConditionReasons = append(conditionsets.ConditionReasons,
- ReasonSucceeded,
- ReasonDeprecated,
- ReasonFailed,
- ReasonBlocked,
- ReasonRetrying,
- )
-}
-
+// BundleMetadata is a representation of the identifying attributes of a bundle.
type BundleMetadata struct {
- // name is a required field and is a reference
- // to the name of a bundle
+ // name is required and follows the DNS subdomain standard
+ // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ // hyphens (-) or periods (.), start and end with an alphanumeric character,
+ // and be no longer than 253 characters.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters"
Name string `json:"name"`
- // version is a required field and is a reference
- // to the version that this bundle represents
+
+ // version is a required field and is a reference to the version that this bundle represents
+ // version follows the semantic versioning standard as defined in https://semver.org/.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:XValidation:rule="self.matches(\"^([0-9]+)(\\\\.[0-9]+)?(\\\\.[0-9]+)?(-([-0-9A-Za-z]+(\\\\.[-0-9A-Za-z]+)*))?(\\\\+([-0-9A-Za-z]+(-\\\\.[-0-9A-Za-z]+)*))?\")",message="version must be well-formed semver"
Version string `json:"version"`
}
-// ClusterExtensionStatus defines the observed state of ClusterExtension.
+// ClusterExtensionStatus defines the observed state of a ClusterExtension.
type ClusterExtensionStatus struct {
- Install *ClusterExtensionInstallStatus `json:"install,omitempty"`
-
- // conditions is a representation of the current state for this ClusterExtension.
- // The status is represented by a set of "conditions".
- //
- // Each condition is generally structured in the following format:
- // - Type: a string representation of the condition type. More or less the condition "name".
- // - Status: a string representation of the state of the condition. Can be one of ["True", "False", "Unknown"].
- // - Reason: a string representation of the reason for the current state of the condition. Typically useful for building automation around particular Type+Reason combinations.
- // - Message: a human readable message that further elaborates on the state of the condition
+ // The set of condition types which apply to all spec.source variations are Installed and Progressing.
//
- // The global set of condition types are:
- // - "Installed", represents whether or not the a bundle has been installed for this ClusterExtension
- // - "Progressing", represents whether or not the ClusterExtension is progressing towards a new state
+ // The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
+ // When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ // When Installed is False and the Reason is Failed, the bundle has failed to install.
//
- // When the ClusterExtension is sourced from a catalog, the following conditions are also possible:
- // - "Deprecated", represents an aggregation of the PackageDeprecated, ChannelDeprecated, and BundleDeprecated condition types
- // - "PackageDeprecated", represents whether or not the package specified in the spec.source.catalog.packageName field has been deprecated
- // - "ChannelDeprecated", represents whether or not any channel specified in spec.source.catalog.channels has been deprecated
- // - "BundleDeprecated", represents whether or not the installed bundle is deprecated
- //
- // The current set of reasons are:
- // - "Succeeded", this reason is set on the "Installed" and "Progressing" conditions when initial installation and progressing to a new state is successful
- // - "Failed", this reason is set on the "Installed" condition when an error has occurred while performing the initial installation.
- // - "Blocked", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that requires manual intervention for recovery
- // - "Retrying", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that could be resolved on subsequent reconciliation attempts
- // - "Deprecated", this reason is set on the "Deprecated", "PackageDeprecated", "ChannelDeprecated", and "BundleDeprecated" conditions to signal that the installed package has been deprecated at the particular scope
+ // The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
+ // When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
+ // When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
+ // When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
//
+ // When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
+ // These are indications from a package owner to guide users away from a particular package, channel, or bundle.
+ // BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ // ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ // PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ // Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
//
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
+ // +optional
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+ // install is a representation of the current installation status for this ClusterExtension.
+ //
+ // +optional
+ Install *ClusterExtensionInstallStatus `json:"install,omitempty"`
}
+// ClusterExtensionInstallStatus is a representation of the status of the identified bundle.
type ClusterExtensionInstallStatus struct {
- // bundle is a representation of the currently installed bundle.
+ // bundle is a required field which represents the identifying attributes of a bundle.
//
// A "bundle" is a versioned set of content that represents the resources that
// need to be applied to a cluster to install a package.
+ //
+ // +kubebuilder:validation:Required
Bundle BundleMetadata `json:"bundle"`
}
-//+kubebuilder:object:root=true
-//+kubebuilder:resource:scope=Cluster
-//+kubebuilder:subresource:status
-//+kubebuilder:printcolumn:name="Installed Bundle",type=string,JSONPath=`.status.install.bundle.name`
-//+kubebuilder:printcolumn:name=Version,type=string,JSONPath=`.status.install.bundle.version`
-//+kubebuilder:printcolumn:name="Installed",type=string,JSONPath=`.status.conditions[?(@.type=='Installed')].status`
-//+kubebuilder:printcolumn:name="Progressing",type=string,JSONPath=`.status.conditions[?(@.type=='Progressing')].status`
-//+kubebuilder:printcolumn:name=Age,type=date,JSONPath=`.metadata.creationTimestamp`
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:scope=Cluster
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Installed Bundle",type=string,JSONPath=`.status.install.bundle.name`
+// +kubebuilder:printcolumn:name=Version,type=string,JSONPath=`.status.install.bundle.version`
+// +kubebuilder:printcolumn:name="Installed",type=string,JSONPath=`.status.conditions[?(@.type=='Installed')].status`
+// +kubebuilder:printcolumn:name="Progressing",type=string,JSONPath=`.status.conditions[?(@.type=='Progressing')].status`
+// +kubebuilder:printcolumn:name=Age,type=date,JSONPath=`.metadata.creationTimestamp`
// ClusterExtension is the Schema for the clusterextensions API
type ClusterExtension struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
- Spec ClusterExtensionSpec `json:"spec,omitempty"`
+ // spec is an optional field that defines the desired state of the ClusterExtension.
+ // +optional
+ Spec ClusterExtensionSpec `json:"spec,omitempty"`
+
+ // status is an optional field that defines the observed state of the ClusterExtension.
+ // +optional
Status ClusterExtensionStatus `json:"status,omitempty"`
}
-//+kubebuilder:object:root=true
+// +kubebuilder:object:root=true
// ClusterExtensionList contains a list of ClusterExtension
type ClusterExtensionList struct {
metav1.TypeMeta `json:",inline"`
+
+ // +optional
metav1.ListMeta `json:"metadata,omitempty"`
- Items []ClusterExtension `json:"items"`
+
+ // items is a required list of ClusterExtension objects.
+ //
+ // +kubebuilder:validation:Required
+ Items []ClusterExtension `json:"items"`
}
func init() {
diff --git a/api/v1alpha1/clusterextension_types_test.go b/api/v1/clusterextension_types_test.go
similarity index 99%
rename from api/v1alpha1/clusterextension_types_test.go
rename to api/v1/clusterextension_types_test.go
index 0ed4f1a08..297a15b13 100644
--- a/api/v1alpha1/clusterextension_types_test.go
+++ b/api/v1/clusterextension_types_test.go
@@ -1,4 +1,4 @@
-package v1alpha1_test
+package v1_test
import (
"fmt"
diff --git a/api/v1alpha1/groupversion_info.go b/api/v1/groupversion_info.go
similarity index 89%
rename from api/v1alpha1/groupversion_info.go
rename to api/v1/groupversion_info.go
index f46abbf3d..f2e8582ee 100644
--- a/api/v1alpha1/groupversion_info.go
+++ b/api/v1/groupversion_info.go
@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha1 contains API Schema definitions for the olm v1alpha1 API group
+// Package v1 contains API Schema definitions for the olm v1 API group
// +kubebuilder:object:generate=true
// +groupName=olm.operatorframework.io
-package v1alpha1
+package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,7 +26,7 @@ import (
var (
// GroupVersion is group version used to register these objects
- GroupVersion = schema.GroupVersion{Group: "olm.operatorframework.io", Version: "v1alpha1"}
+ GroupVersion = schema.GroupVersion{Group: "olm.operatorframework.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
similarity index 95%
rename from api/v1alpha1/zz_generated.deepcopy.go
rename to api/v1/zz_generated.deepcopy.go
index ccd143aec..622bd2b83 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -18,10 +18,10 @@ limitations under the License.
// Code generated by controller-gen. DO NOT EDIT.
-package v1alpha1
+package v1
import (
- "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -63,7 +63,11 @@ func (in *CatalogSource) DeepCopyInto(out *CatalogSource) {
*out = make([]string, len(*in))
copy(*out, *in)
}
- in.Selector.DeepCopyInto(&out.Selector)
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource.
@@ -106,7 +110,6 @@ func (in *ClusterExtension) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterExtensionInstallConfig) DeepCopyInto(out *ClusterExtensionInstallConfig) {
*out = *in
- out.ServiceAccount = in.ServiceAccount
if in.Preflight != nil {
in, out := &in.Preflight, &out.Preflight
*out = new(PreflightConfig)
@@ -175,8 +178,13 @@ func (in *ClusterExtensionList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterExtensionSpec) DeepCopyInto(out *ClusterExtensionSpec) {
*out = *in
+ out.ServiceAccount = in.ServiceAccount
in.Source.DeepCopyInto(&out.Source)
- in.Install.DeepCopyInto(&out.Install)
+ if in.Install != nil {
+ in, out := &in.Install, &out.Install
+ *out = new(ClusterExtensionInstallConfig)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterExtensionSpec.
@@ -192,18 +200,18 @@ func (in *ClusterExtensionSpec) DeepCopy() *ClusterExtensionSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterExtensionStatus) DeepCopyInto(out *ClusterExtensionStatus) {
*out = *in
- if in.Install != nil {
- in, out := &in.Install, &out.Install
- *out = new(ClusterExtensionInstallStatus)
- **out = **in
- }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make([]v1.Condition, len(*in))
+ *out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.Install != nil {
+ in, out := &in.Install, &out.Install
+ *out = new(ClusterExtensionInstallStatus)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterExtensionStatus.
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
index c353a4cb0..38d7534f3 100644
--- a/cmd/manager/main.go
+++ b/cmd/manager/main.go
@@ -46,10 +46,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
- catalogd "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
"github.com/operator-framework/operator-controller/internal/action"
"github.com/operator-framework/operator-controller/internal/applier"
"github.com/operator-framework/operator-controller/internal/authentication"
@@ -141,8 +141,8 @@ func main() {
setupLog.Info("set up manager")
cacheOptions := crcache.Options{
ByObject: map[client.Object]crcache.ByObject{
- &ocv1alpha1.ClusterExtension{}: {Label: k8slabels.Everything()},
- &catalogd.ClusterCatalog{}: {Label: k8slabels.Everything()},
+ &ocv1.ClusterExtension{}: {Label: k8slabels.Everything()},
+ &catalogd.ClusterCatalog{}: {Label: k8slabels.Everything()},
},
DefaultNamespaces: map[string]crcache.Config{
systemNamespace: {LabelSelector: k8slabels.Everything()},
@@ -196,8 +196,8 @@ func main() {
cfgGetter, err := helmclient.NewActionConfigGetter(mgr.GetConfig(), mgr.GetRESTMapper(),
helmclient.StorageDriverMapper(action.ChunkedStorageDriverMapper(coreClient, mgr.GetAPIReader(), systemNamespace)),
helmclient.ClientNamespaceMapper(func(obj client.Object) (string, error) {
- ext := obj.(*ocv1alpha1.ClusterExtension)
- return ext.Spec.Install.Namespace, nil
+ ext := obj.(*ocv1.ClusterExtension)
+ return ext.Spec.Namespace, nil
}),
helmclient.ClientRestConfigMapper(clientRestConfigMapper),
)
@@ -291,7 +291,7 @@ func main() {
cm := contentmanager.NewManager(clientRestConfigMapper, mgr.GetConfig(), mgr.GetRESTMapper())
err = clusterExtensionFinalizers.Register(controllers.ClusterExtensionCleanupContentManagerCacheFinalizer, finalizers.FinalizerFunc(func(ctx context.Context, obj client.Object) (crfinalizer.Result, error) {
- ext := obj.(*ocv1alpha1.ClusterExtension)
+ ext := obj.(*ocv1.ClusterExtension)
err := cm.Delete(ext)
return crfinalizer.Result{}, err
}))
diff --git a/codecov.yml b/codecov.yml
index 4dde336da..a3bfabd61 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -1,3 +1,11 @@
codecov:
notify:
after_n_builds: 2
+
+# Configure the paths to include in coverage reports.
+# Exclude documentation, YAML configurations, and test files.
+coverage:
+ paths:
+ - "api/"
+ - "cmd/"
+ - "internal/"
\ No newline at end of file
diff --git a/commitchecker.yaml b/commitchecker.yaml
index 784607239..906d2be07 100644
--- a/commitchecker.yaml
+++ b/commitchecker.yaml
@@ -1,4 +1,4 @@
-expectedMergeBase: 6bda277736597142d5863e9884f872b2ffd77b63
+expectedMergeBase: 6d79092c8921f888b125e084b494576e87d5706e
upstreamBranch: main
upstreamOrg: operator-framework
upstreamRepo: operator-controller
diff --git a/config/base/crd/bases/olm.operatorframework.io_clusterextensions.yaml b/config/base/crd/bases/olm.operatorframework.io_clusterextensions.yaml
index 255010147..a908b256d 100644
--- a/config/base/crd/bases/olm.operatorframework.io_clusterextensions.yaml
+++ b/config/base/crd/bases/olm.operatorframework.io_clusterextensions.yaml
@@ -30,7 +30,7 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- name: v1alpha1
+ name: v1
schema:
openAPIV3Schema:
description: ClusterExtension is the Schema for the clusterextensions API
@@ -53,60 +53,21 @@ spec:
metadata:
type: object
spec:
- description: ClusterExtensionSpec defines the desired state of ClusterExtension
+ description: spec is an optional field that defines the desired state
+ of the ClusterExtension.
properties:
install:
description: |-
- install is a required field used to configure the installation options
- for the ClusterExtension such as the installation namespace,
- the service account and the pre-flight check configuration.
-
- Below is a minimal example of an installation definition (in yaml):
- install:
- namespace: example-namespace
- serviceAccount:
- name: example-sa
+ install is an optional field used to configure the installation options
+ for the ClusterExtension such as the pre-flight check configuration.
properties:
- namespace:
- description: |-
- namespace is a reference to the Namespace in which the bundle of
- content for the package referenced in the packageName field will be applied.
- The bundle may contain cluster-scoped resources or resources that are
- applied to other Namespaces. This Namespace is expected to exist.
-
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. This means that valid values:
- - Contain no more than 63 characters
- - Contain only lowercase alphanumeric characters or '-'
- - Start with an alphanumeric character
- - End with an alphanumeric character
-
- Some examples of valid values are:
- - some-namespace
- - 123-namespace
- - 1-namespace-2
- - somenamespace
-
- Some examples of invalid values are:
- - -some-namespace
- - some-namespace-
- - thisisareallylongnamespacenamethatisgreaterthanthemaximumlength
- - some.namespace
-
- [RFC 1123]: https://tools.ietf.org/html/rfc1123
- maxLength: 63
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
- type: string
- x-kubernetes-validations:
- - message: namespace is immutable
- rule: self == oldSelf
preflight:
description: |-
- preflight is an optional field that can be used to configure the preflight checks run before installation or upgrade of the content for the package specified in the packageName field.
-
- When specified, it overrides the default configuration of the preflight checks that are required to execute successfully during an install/upgrade operation.
+ preflight is an optional field that can be used to configure the checks that are
+ run before installation or upgrade of the content for the package specified in the packageName field.
- When not specified, the default configuration for each preflight check will be used.
+ When specified, it replaces the default preflight configuration for install/upgrade actions.
+ When not specified, the default configuration will be used.
properties:
crdUpgradeSafety:
description: |-
@@ -115,31 +76,25 @@ spec:
The CRD Upgrade Safety pre-flight check safeguards from unintended
consequences of upgrading a CRD, such as data loss.
-
- This field is required if the spec.install.preflight field is specified.
properties:
- policy:
- default: Enabled
+ enforcement:
description: |-
- policy is used to configure the state of the CRD Upgrade Safety pre-flight check.
-
- This field is required when the spec.install.preflight.crdUpgradeSafety field is
- specified.
+ enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
- Allowed values are ["Enabled", "Disabled"]. The default value is "Enabled".
+ Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "Disabled", the CRD Upgrade Safety pre-flight check will be skipped
+ When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
when performing an upgrade operation. This should be used with caution as
unintended consequences such as data loss can occur.
- When set to "Enabled", the CRD Upgrade Safety pre-flight check will be run when
+ When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
performing an upgrade operation.
enum:
- - Enabled
- - Disabled
+ - None
+ - Strict
type: string
required:
- - policy
+ - enforcement
type: object
required:
- crdUpgradeSafety
@@ -148,56 +103,77 @@ spec:
- message: at least one of [crdUpgradeSafety] are required when
preflight is specified
rule: has(self.crdUpgradeSafety)
- serviceAccount:
+ type: object
+ x-kubernetes-validations:
+ - message: at least one of [preflight] are required when install is
+ specified
+ rule: has(self.preflight)
+ namespace:
+ description: |-
+ namespace is a reference to a Kubernetes namespace.
+ This is the namespace in which the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources
+ for the extension are applied to the cluster.
+ Some extensions may contain namespace-scoped resources to be applied in other namespaces.
+ This namespace must exist.
+
+ namespace is required, immutable, and follows the DNS label standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
+ start and end with an alphanumeric character, and be no longer than 63 characters
+
+ [RFC 1123]: https://tools.ietf.org/html/rfc1123
+ maxLength: 63
+ type: string
+ x-kubernetes-validations:
+ - message: namespace is immutable
+ rule: self == oldSelf
+ - message: namespace must be a valid DNS1123 label
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
+ serviceAccount:
+ description: |-
+ serviceAccount is a reference to a ServiceAccount used to perform all interactions
+ with the cluster that are required to manage the extension.
+ The ServiceAccount must be configured with the necessary permissions to perform these interactions.
+ The ServiceAccount must exist in the namespace referenced in the spec.
+ serviceAccount is required.
+ properties:
+ name:
description: |-
- serviceAccount is a required reference to a ServiceAccount that exists
- in the installNamespace. The provided ServiceAccount is used to install and
- manage the content for the package specified in the packageName field.
-
- In order to successfully install and manage the content for the package,
- the ServiceAccount provided via this field should be configured with the
- appropriate permissions to perform the necessary operations on all the
- resources that are included in the bundle of content being applied.
- properties:
- name:
- description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount
+ to be used for installation and management of the content for the package
+ specified in the packageName field.
- This ServiceAccount is expected to exist in the installNamespace.
+ This ServiceAccount must exist in the installNamespace.
- This field follows the DNS subdomain name standard as defined in [RFC
- 1123]. This means that valid values:
- - Contain no more than 253 characters
- - Contain only lowercase alphanumeric characters, '-', or '.'
- - Start with an alphanumeric character
- - End with an alphanumeric character
+ name follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters.
- Some examples of valid values are:
- - some-serviceaccount
- - 123-serviceaccount
- - 1-serviceaccount-2
- - someserviceaccount
- - some.serviceaccount
+ Some examples of valid values are:
+ - some-serviceaccount
+ - 123-serviceaccount
+ - 1-serviceaccount-2
+ - someserviceaccount
+ - some.serviceaccount
- Some examples of invalid values are:
- - -some-serviceaccount
- - some-serviceaccount-
+ Some examples of invalid values are:
+ - -some-serviceaccount
+ - some-serviceaccount-
- [RFC 1123]: https://tools.ietf.org/html/rfc1123
- maxLength: 253
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
- type: string
- x-kubernetes-validations:
- - message: name is immutable
- rule: self == oldSelf
- required:
- - name
- type: object
+ [RFC 1123]: https://tools.ietf.org/html/rfc1123
+ maxLength: 253
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ - message: name must be a valid DNS1123 subdomain. It must contain
+ only lowercase alphanumeric characters, hyphens (-) or periods
+ (.), start and end with an alphanumeric character, and be
+ no longer than 253 characters
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
required:
- - namespace
- - serviceAccount
+ - name
type: object
source:
description: |-
@@ -216,15 +192,20 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog. This field must be defined when sourceType is set to "Catalog",
- and must be the only field defined for this sourceType.
+ catalog is used to configure how information is sourced from a catalog.
+ This field is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
channels is an optional reference to a set of channels belonging to
the package specified in the packageName field.
- A "channel" is a package author defined stream of updates for an extension.
+ A "channel" is a package-author-defined stream of updates for an extension.
+
+ Each channel in the list must follow the DNS subdomain standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters. No more than 256 channels can be specified.
When specified, it is used to constrain the set of installable bundles and
the automated upgrade path. This constraint is an AND operation with the
@@ -236,13 +217,6 @@ spec:
When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
- This field follows the DNS subdomain name standard as defined in [RFC
- 1123]. This means that valid entries:
- - Contain no more than 253 characters
- - Contain only lowercase alphanumeric characters, '-', or '.'
- - Start with an alphanumeric character
- - End with an alphanumeric character
-
Some examples of valid values are:
- 1.1.x
- alpha
@@ -263,20 +237,21 @@ spec:
[RFC 1123]: https://tools.ietf.org/html/rfc1123
items:
maxLength: 253
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
+ x-kubernetes-validations:
+ - message: channels entries must be valid DNS1123 subdomains
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
+ maxItems: 256
type: array
packageName:
description: |-
packageName is a reference to the name of the package to be installed
and is used to filter the content from catalogs.
- This field is required, immutable and follows the DNS subdomain name
- standard as defined in [RFC 1123]. This means that valid entries:
- - Contain no more than 253 characters
- - Contain only lowercase alphanumeric characters, '-', or '.'
- - Start with an alphanumeric character
- - End with an alphanumeric character
+ packageName is required, immutable, and follows the DNS subdomain standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -292,11 +267,15 @@ spec:
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 253
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
x-kubernetes-validations:
- message: packageName is immutable
rule: self == oldSelf
+ - message: packageName must be a valid DNS1123 subdomain.
+ It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric
+ character, and be no longer than 253 characters
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
selector is an optional field that can be used
@@ -356,7 +335,7 @@ spec:
the upgrade path(s) defined in the catalog are enforced for the package
referenced in the packageName field.
- Allowed values are: ["CatalogProvided", "SelfCertified"].
+ Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
When this field is set to "CatalogProvided", automatic upgrades will only occur
when upgrade constraints specified by the package author are met.
@@ -368,7 +347,7 @@ spec:
loss. It is assumed that users have independently verified changes when
using this option.
- If unspecified, the default value is "CatalogProvided".
+ When this field is omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
@@ -449,8 +428,10 @@ spec:
For more information on semver, please see https://semver.org/
maxLength: 64
- pattern: ^(\s*(=||!=|>|<|>=|=>|<=|=<|~|~>|\^)\s*(v?(0|[1-9]\d*|[x|X|\*])(\.(0|[1-9]\d*|x|X|\*]))?(\.(0|[1-9]\d*|x|X|\*))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)((?:\s+|,\s*|\s*\|\|\s*)(=||!=|>|<|>=|=>|<=|=<|~|~>|\^)\s*(v?(0|[1-9]\d*|x|X|\*])(\.(0|[1-9]\d*|x|X|\*))?(\.(0|[1-9]\d*|x|X|\*]))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)*$
type: string
+ x-kubernetes-validations:
+ - message: invalid version expression
+ rule: self.matches("^(\\s*(=||!=|>|<|>=|=>|<=|=<|~|~>|\\^)\\s*(v?(0|[1-9]\\d*|[x|X|\\*])(\\.(0|[1-9]\\d*|x|X|\\*]))?(\\.(0|[1-9]\\d*|x|X|\\*))?(-([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?(\\+([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?)\\s*)((?:\\s+|,\\s*|\\s*\\|\\|\\s*)(=||!=|>|<|>=|=>|<=|=<|~|~>|\\^)\\s*(v?(0|[1-9]\\d*|x|X|\\*])(\\.(0|[1-9]\\d*|x|X|\\*))?(\\.(0|[1-9]\\d*|x|X|\\*]))?(-([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?(\\+([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?)\\s*)*$")
required:
- packageName
type: object
@@ -458,11 +439,12 @@ spec:
description: |-
sourceType is a required reference to the type of install source.
- Allowed values are ["Catalog"]
+ Allowed values are "Catalog"
- When this field is set to "Catalog", information for determining the appropriate
- bundle of content to install will be fetched from ClusterCatalog resources existing
- on the cluster. When using the Catalog sourceType, the catalog field must also be set.
+ When this field is set to "Catalog", information for determining the
+ appropriate bundle of content to install will be fetched from
+ ClusterCatalog resources existing on the cluster.
+ When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
type: string
@@ -470,42 +452,38 @@ spec:
- sourceType
type: object
x-kubernetes-validations:
- - message: sourceType Catalog requires catalog field
- rule: self.sourceType == 'Catalog' && has(self.catalog)
+ - message: catalog is required when sourceType is Catalog, and forbidden
+ otherwise
+ rule: 'has(self.sourceType) && self.sourceType == ''Catalog'' ?
+ has(self.catalog) : !has(self.catalog)'
required:
- - install
+ - namespace
+ - serviceAccount
- source
type: object
status:
- description: ClusterExtensionStatus defines the observed state of ClusterExtension.
+ description: status is an optional field that defines the observed state
+ of the ClusterExtension.
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterExtension.
- The status is represented by a set of "conditions".
-
- Each condition is generally structured in the following format:
- - Type: a string representation of the condition type. More or less the condition "name".
- - Status: a string representation of the state of the condition. Can be one of ["True", "False", "Unknown"].
- - Reason: a string representation of the reason for the current state of the condition. Typically useful for building automation around particular Type+Reason combinations.
- - Message: a human readable message that further elaborates on the state of the condition
-
- The global set of condition types are:
- - "Installed", represents whether or not the a bundle has been installed for this ClusterExtension
- - "Progressing", represents whether or not the ClusterExtension is progressing towards a new state
-
- When the ClusterExtension is sourced from a catalog, the following conditions are also possible:
- - "Deprecated", represents an aggregation of the PackageDeprecated, ChannelDeprecated, and BundleDeprecated condition types
- - "PackageDeprecated", represents whether or not the package specified in the spec.source.catalog.packageName field has been deprecated
- - "ChannelDeprecated", represents whether or not any channel specified in spec.source.catalog.channels has been deprecated
- - "BundleDeprecated", represents whether or not the installed bundle is deprecated
-
- The current set of reasons are:
- - "Succeeded", this reason is set on the "Installed" and "Progressing" conditions when initial installation and progressing to a new state is successful
- - "Failed", this reason is set on the "Installed" condition when an error has occurred while performing the initial installation.
- - "Blocked", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that requires manual intervention for recovery
- - "Retrying", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that could be resolved on subsequent reconciliation attempts
- - "Deprecated", this reason is set on the "Deprecated", "PackageDeprecated", "ChannelDeprecated", and "BundleDeprecated" conditions to signal that the installed package has been deprecated at the particular scope
+ The set of condition types which apply to all spec.source variations are Installed and Progressing.
+
+ The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
+ When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ When Installed is False and the Reason is Failed, the bundle has failed to install.
+
+ The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
+ When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
+ When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
+ When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
+
+ When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle.
+ BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
@@ -565,24 +543,37 @@ spec:
- type
x-kubernetes-list-type: map
install:
+ description: install is a representation of the current installation
+ status for this ClusterExtension.
properties:
bundle:
description: |-
- bundle is a representation of the currently installed bundle.
+ bundle is a required field which represents the identifying attributes of a bundle.
A "bundle" is a versioned set of content that represents the resources that
need to be applied to a cluster to install a package.
properties:
name:
description: |-
- name is a required field and is a reference
- to the name of a bundle
+ name is required and follows the DNS subdomain standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters.
type: string
+ x-kubernetes-validations:
+ - message: packageName must be a valid DNS1123 subdomain.
+ It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric
+ character, and be no longer than 253 characters
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference
- to the version that this bundle represents
+ version is a required field and is a reference to the version that this bundle represents
+ version follows the semantic versioning standard as defined in https://semver.org/.
type: string
+ x-kubernetes-validations:
+ - message: version must be well-formed semver
+ rule: self.matches("^([0-9]+)(\\.[0-9]+)?(\\.[0-9]+)?(-([-0-9A-Za-z]+(\\.[-0-9A-Za-z]+)*))?(\\+([-0-9A-Za-z]+(-\\.[-0-9A-Za-z]+)*))?")
required:
- name
- version
diff --git a/config/samples/catalogd_operatorcatalog.yaml b/config/samples/catalogd_operatorcatalog.yaml
index 48f1da573..4ce96d5b3 100644
--- a/config/samples/catalogd_operatorcatalog.yaml
+++ b/config/samples/catalogd_operatorcatalog.yaml
@@ -1,4 +1,4 @@
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterCatalog
metadata:
name: operatorhubio
@@ -7,4 +7,4 @@ spec:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest
- pollInterval: 10m
+ pollIntervalMinutes: 10
diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml
index bd1783176..2a64d96c2 100644
--- a/config/samples/kustomization.yaml
+++ b/config/samples/kustomization.yaml
@@ -1,5 +1,4 @@
## Append samples of your project ##
resources:
-- olm_v1alpha1_clusterextension.yaml
-- olm_v1alpha1_extension.yaml
+- olm_v1_clusterextension.yaml
#+kubebuilder:scaffold:manifestskustomizesamples
diff --git a/config/samples/olm_v1alpha1_clusterextension.yaml b/config/samples/olm_v1_clusterextension.yaml
similarity index 98%
rename from config/samples/olm_v1alpha1_clusterextension.yaml
rename to config/samples/olm_v1_clusterextension.yaml
index 7536c3d90..80aa801ae 100644
--- a/config/samples/olm_v1alpha1_clusterextension.yaml
+++ b/config/samples/olm_v1_clusterextension.yaml
@@ -267,17 +267,16 @@ subjects:
name: argocd-installer
namespace: argocd
---
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: argocd
spec:
+ namespace: argocd
+ serviceAccount:
+ name: argocd-installer
source:
sourceType: Catalog
catalog:
packageName: argocd-operator
version: 0.6.0
- install:
- namespace: argocd
- serviceAccount:
- name: argocd-installer
diff --git a/docs/api-reference/catalogd-api-reference.md b/docs/api-reference/catalogd-api-reference.md
index 90ec13904..b313d2646 100644
--- a/docs/api-reference/catalogd-api-reference.md
+++ b/docs/api-reference/catalogd-api-reference.md
@@ -1,26 +1,35 @@
# API Reference
## Packages
-- [olm.operatorframework.io/core](#olmoperatorframeworkiocore)
-- [olm.operatorframework.io/v1alpha1](#olmoperatorframeworkiov1alpha1)
+- [olm.operatorframework.io/v1](#olmoperatorframeworkiov1)
-## olm.operatorframework.io/core
+## olm.operatorframework.io/v1
-Package api is the internal version of the API.
+Package v1 contains API Schema definitions for the core v1 API group
+
+### Resource Types
+- [ClusterCatalog](#clustercatalog)
+- [ClusterCatalogList](#clustercataloglist)
+#### AvailabilityMode
-## olm.operatorframework.io/v1alpha1
+_Underlying type:_ _string_
-Package v1alpha1 contains API Schema definitions for the core v1alpha1 API group
+AvailabilityMode defines the availability of the catalog
-### Resource Types
-- [ClusterCatalog](#clustercatalog)
-- [ClusterCatalogList](#clustercataloglist)
+_Appears in:_
+- [ClusterCatalogSpec](#clustercatalogspec)
+
+| Field | Description |
+| --- | --- |
+| `Available` | |
+| `Unavailable` | |
+
#### CatalogSource
@@ -36,8 +45,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `type` _[SourceType](#sourcetype)_ | type is a required reference to the type of source the catalog is sourced from.
Allowed values are ["Image"]
When this field is set to "Image", the ClusterCatalog content will be sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image]
Required: \{\}
|
-| `image` _[ImageSource](#imagesource)_ | image is used to configure how catalog contents are sourced from an OCI image. This field must be set when type is set to "Image" and must be the only field defined for this type. | | |
+| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from.
type is required.
The only allowed value is "Image".
When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image]
Required: \{\}
|
+| `image` _[ImageSource](#imagesource)_ | image is used to configure how catalog contents are sourced from an OCI image.
This field is required when type is Image, and forbidden otherwise. | | |
#### ClusterCatalog
@@ -54,13 +63,13 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `apiVersion` _string_ | `olm.operatorframework.io/v1alpha1` | | |
+| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterCatalog` | | |
| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
-| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | | | |
-| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | | | |
+| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is the desired state of the ClusterCatalog.
spec is required.
The controller will work to ensure that the desired
catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\}
|
+| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains information about the state of the ClusterCatalog such as:
- Whether or not the catalog contents are being served via the catalog content HTTP server
- Whether or not the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved | | |
#### ClusterCatalogList
@@ -75,12 +84,12 @@ ClusterCatalogList contains a list of ClusterCatalog
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `apiVersion` _string_ | `olm.operatorframework.io/v1alpha1` | | |
+| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterCatalogList` | | |
| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
-| `items` _[ClusterCatalog](#clustercatalog) array_ | | | |
+| `items` _[ClusterCatalog](#clustercatalog) array_ | items is a list of ClusterCatalogs.
items is required. | | Required: \{\}
|
#### ClusterCatalogSpec
@@ -96,9 +105,9 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `source` _[CatalogSource](#catalogsource)_ | source is a required field that allows the user to define the source of a Catalog that contains catalog metadata in the File-Based Catalog (FBC) format.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
source:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs | | |
-| `priority` _integer_ | priority is an optional field that allows the user to define a priority for a ClusterCatalog.
A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
For example, in the case where multiple ClusterCatalogs provide the same bundle.
A higher number means higher priority. Negative numbers are also accepted.
When omitted, the default priority is 0. | 0 | |
-| `availability` _string_ | Availability is an optional field that allows users to define whether the ClusterCatalog is utilized by the operator-controller.
Allowed values are : ["Enabled", "Disabled"].
If set to "Enabled", the catalog will be used for updates, serving contents, and package installations.
If set to "Disabled", catalogd will stop serving the catalog and the cached data will be removed.
If unspecified, the default value is "Enabled" | Enabled | Enum: [Disabled Enabled]
|
+| `source` _[CatalogSource](#catalogsource)_ | source allows a user to define the source of a catalog.
A "catalog" contains information on content that can be installed on a cluster.
Providing a catalog source makes the contents of the catalog discoverable and usable by
other on-cluster components.
These on-cluster components may do a variety of things with this information, such as
presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
source:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest | | Required: \{\}
|
+| `priority` _integer_ | priority allows the user to define a priority for a ClusterCatalog.
priority is optional.
A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
A higher number means higher priority.
It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
When omitted, the default priority is 0 because that is the zero value of integers.
Negative numbers can be used to specify a priority lower than the default.
Positive numbers can be used to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647. | 0 | |
+| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
availabilityMode is optional.
Allowed values are "Available" and "Unavailable" and omitted.
When omitted, the default value is "Available".
When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
and its contents as usable.
When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. | Available | Enum: [Unavailable Available]
|
#### ClusterCatalogStatus
@@ -114,10 +123,10 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions is a representation of the current state for this ClusterCatalog.
The status is represented by a set of "conditions".
Each condition is generally structured in the following format:
- Type: a string representation of the condition type. More or less the condition "name".
- Status: a string representation of the state of the condition. Can be one of ["True", "False", "Unknown"].
- Reason: a string representation of the reason for the current state of the condition. Typically useful for building automation around particular Type+Reason combinations.
- Message: a human-readable message that further elaborates on the state of the condition.
The current set of condition types are:
- "Serving", which represents whether or not the contents of the catalog are being served via the HTTP(S) web server.
- "Progressing", which represents whether or not the ClusterCatalog is progressing towards a new state.
The current set of reasons are:
- "Succeeded", this reason is set on the "Progressing" condition when progressing to a new state is successful.
- "Blocked", this reason is set on the "Progressing" condition when the ClusterCatalog controller has encountered an error that requires manual intervention for recovery.
- "Retrying", this reason is set on the "Progressing" condition when the ClusterCatalog controller has encountered an error that might be resolvable on subsequent reconciliation attempts.
- "Available", this reason is set on the "Serving" condition when the contents of the ClusterCatalog are being served via an endpoint on the HTTP(S) web server.
- "Unavailable", this reason is set on the "Serving" condition when there is not an endpoint on the HTTP(S) web server that is serving the contents of the ClusterCatalog. | | |
-| `resolvedSource` _[ResolvedCatalogSource](#resolvedcatalogsource)_ | resolvedSource contains information about the resolved source based on the source type.
Below is an example of a resolved source for an image source:
resolvedSource:
image:
lastSuccessfulPollAttempt: "2024-09-10T12:22:13Z"
ref: quay.io/operatorhubio/catalog@sha256:c7392b4be033da629f9d665fec30f6901de51ce3adebeff0af579f311ee5cf1b
type: Image | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions is a representation of the current state for this ClusterCatalog.
The current condition types are Serving and Progressing.
The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
When it has a status of True and a reason of Available, the contents of the catalog are being served.
When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
to the contents we identify that there are updates to the contents. | | |
+| `resolvedSource` _[ResolvedCatalogSource](#resolvedcatalogsource)_ | resolvedSource contains information about the resolved source based on the source type. | | |
| `urls` _[ClusterCatalogURLs](#clustercatalogurls)_ | urls contains the URLs that can be used to access the catalog. | | |
-| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the time when the
ClusterCatalog object was last unpacked successfully. | | |
+| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the contents of the
catalog were extracted from their source format. As an example,
when using an Image source, the OCI image will be pulled and the
image layers written to a file-system backed cache. We refer to the
act of this extraction from the source format as "unpacking". | | |
#### ClusterCatalogURLs
@@ -133,7 +142,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `base` _string_ | base is a required cluster-internal URL which provides API access for this ClusterCatalog.
A suffix API access path can be added to retrieve catalog data for the ClusterCatalog.
Currently, a 'v1' API access provides complete FBC retrival via the path "/api/v1/all", with the general form `\{base\}/api/v1/all`. | | Required: \{\}
|
+| `base` _string_ | base is a cluster-internal URL that provides endpoints for
accessing the content of the catalog.
It is expected that clients append the path for the endpoint they wish
to access.
Currently, only a single endpoint is served and is accessible at the path
/api/v1.
The endpoints served for the v1 API are:
- /all - this endpoint returns the entirety of the catalog contents in the FBC format
As the needs of users and clients of the evolve, new endpoints may be added. | | MaxLength: 525
Required: \{\}
|
#### ImageSource
@@ -143,14 +152,18 @@ _Appears in:_
ImageSource enables users to define the information required for sourcing a Catalog from an OCI image
+If we see that there is a possibly valid digest-based image reference AND pollIntervalMinutes is specified,
+reject the resource since there is no use in polling a digest-based image reference.
+
+
_Appears in:_
- [CatalogSource](#catalogsource)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `ref` _string_ | ref is a required field that allows the user to define the reference to a container image containing Catalog contents.
Examples:
ref: quay.io/operatorhubio/catalog:latest # image reference
ref: quay.io/operatorhubio/catalog@sha256:c7392b4be033da629f9d665fec30f6901de51ce3adebeff0af579f311ee5cf1b # image reference with sha256 digest | | |
-| `pollInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#duration-v1-meta)_ | pollInterval is an optional field that allows the user to set the interval at which the image source should be polled for new content.
It must be specified as a duration.
It must not be specified for a catalog image referenced by a sha256 digest.
Examples:
pollInterval: 1h # poll the image source every hour
pollInterval: 30m # poll the image source every 30 minutes
pollInterval: 1h30m # poll the image source every 1 hour and 30 minutes
When omitted, the image will not be polled for new content. | | Format: duration
|
+| `ref` _string_ | ref allows users to define the reference to a container image containing Catalog contents.
ref is required.
ref can not be more than 1000 characters.
A reference can be broken down into 3 parts - the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
The port must be the last value in the domain.
Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
The name is typically the repository in the registry where an image is located.
It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
Multiple names can be concatenated with the "/" character.
The domain and name are combined using the "/" character.
Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
For a digest-based reference, the "@" character is the separator.
For a tag-based reference, the ":" character is the separator.
An identifier is required in the reference.
Digest-based references must contain an algorithm reference immediately after the "@" separator.
The algorithm reference must be followed by the ":" character and an encoded string.
The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
The tag must not be longer than 127 characters.
An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000
Required: \{\}
|
+| `pollIntervalMinutes` _integer_ | pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
pollIntervalMinutes is optional.
pollIntervalMinutes can not be specified when ref is a digest-based reference.
When omitted, the image will not be polled for new content. | | Minimum: 1
|
#### ResolvedCatalogSource
@@ -167,8 +180,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from.
It will be set to one of the following values: ["Image"].
When this field is set to "Image", information about the resolved image source will be set in the 'image' field. | | Enum: [Image]
Required: \{\}
|
-| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image is a field containing resolution information for a catalog sourced from an image. | | |
+| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from.
type is required.
The only allowed value is "Image".
When set to "Image", information about the resolved image source will be set in the 'image' field. | | Enum: [Image]
Required: \{\}
|
+| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image is a field containing resolution information for a catalog sourced from an image.
This field must be set when type is Image, and forbidden otherwise. | | |
#### ResolvedImageSource
@@ -184,8 +197,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `ref` _string_ | ref contains the resolved sha256 image ref containing Catalog contents. | | |
-| `lastSuccessfulPollAttempt` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastSuccessfulPollAttempt is the time when the resolved source was last successfully polled for new content. | | |
+| `ref` _string_ | ref contains the resolved image digest-based reference.
The digest format is used so users can use other tooling to fetch the exact
OCI manifests that were used to extract the catalog contents. | | MaxLength: 1000
Required: \{\}
|
#### SourceType
diff --git a/docs/api-reference/operator-controller-api-reference.md b/docs/api-reference/operator-controller-api-reference.md
index 86bd90190..c3a3862b1 100644
--- a/docs/api-reference/operator-controller-api-reference.md
+++ b/docs/api-reference/operator-controller-api-reference.md
@@ -1,12 +1,12 @@
# API Reference
## Packages
-- [olm.operatorframework.io/v1alpha1](#olmoperatorframeworkiov1alpha1)
+- [olm.operatorframework.io/v1](#olmoperatorframeworkiov1)
-## olm.operatorframework.io/v1alpha1
+## olm.operatorframework.io/v1
-Package v1alpha1 contains API Schema definitions for the olm v1alpha1 API group
+Package v1 contains API Schema definitions for the olm v1 API group
### Resource Types
- [ClusterExtension](#clusterextension)
@@ -18,7 +18,7 @@ Package v1alpha1 contains API Schema definitions for the olm v1alpha1 API group
-
+BundleMetadata is a representation of the identifying attributes of a bundle.
@@ -27,11 +27,11 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `name` _string_ | name is a required field and is a reference
to the name of a bundle | | |
-| `version` _string_ | version is a required field and is a reference
to the version that this bundle represents | | |
+| `name` _string_ | name is required and follows the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters. | | Required: \{\}
|
+| `version` _string_ | version is a required field and is a reference to the version that this bundle represents
version follows the semantic versioning standard as defined in https://semver.org/. | | Required: \{\}
|
-#### CRDUpgradeSafetyPolicy
+#### CRDUpgradeSafetyEnforcement
_Underlying type:_ _string_
@@ -44,8 +44,8 @@ _Appears in:_
| Field | Description |
| --- | --- |
-| `Enabled` | |
-| `Disabled` | |
+| `None` | None will not perform CRD upgrade safety checks.
|
+| `Strict` | Strict will enforce the CRD upgrade safety check and block the upgrade if the CRD would not pass the check.
|
#### CRDUpgradeSafetyPreflightConfig
@@ -61,14 +61,14 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `policy` _[CRDUpgradeSafetyPolicy](#crdupgradesafetypolicy)_ | policy is used to configure the state of the CRD Upgrade Safety pre-flight check.
This field is required when the spec.install.preflight.crdUpgradeSafety field is
specified.
Allowed values are ["Enabled", "Disabled"]. The default value is "Enabled".
When set to "Disabled", the CRD Upgrade Safety pre-flight check will be skipped
when performing an upgrade operation. This should be used with caution as
unintended consequences such as data loss can occur.
When set to "Enabled", the CRD Upgrade Safety pre-flight check will be run when
performing an upgrade operation. | Enabled | Enum: [Enabled Disabled]
|
+| `enforcement` _[CRDUpgradeSafetyEnforcement](#crdupgradesafetyenforcement)_ | enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
when performing an upgrade operation. This should be used with caution as
unintended consequences such as data loss can occur.
When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
performing an upgrade operation. | | Enum: [None Strict]
Required: \{\}
|
#### CatalogSource
-CatalogSource defines the required fields for catalog source.
+CatalogSource defines the attributes used to identify and filter content from a catalog.
@@ -77,11 +77,11 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `packageName` _string_ | packageName is a reference to the name of the package to be installed
and is used to filter the content from catalogs.
This field is required, immutable and follows the DNS subdomain name
standard as defined in [RFC 1123]. This means that valid entries:
- Contain no more than 253 characters
- Contain only lowercase alphanumeric characters, '-', or '.'
- Start with an alphanumeric character
- End with an alphanumeric character
Some examples of valid values are:
- some-package
- 123-package
- 1-package-2
- somepackage
Some examples of invalid values are:
- -some-package
- some-package-
- thisisareallylongpackagenamethatisgreaterthanthemaximumlength
- some.package
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Pattern: `^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
|
-| `version` _string_ | version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or
more comparison operators, known as comparison strings. Additional
comparison strings can be added using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
Pattern: `^(\s*(=\|\|!=\|>\|<\|>=\|=>\|<=\|=<\|~\|~>\|\^)\s*(v?(0\|[1-9]\d*\|[x\|X\|\*])(\.(0\|[1-9]\d*\|x\|X\|\*]))?(\.(0\|[1-9]\d*\|x\|X\|\*))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)((?:\s+\|,\s*\|\s*\\|\\|\s*)(=\|\|!=\|>\|<\|>=\|=>\|<=\|=<\|~\|~>\|\^)\s*(v?(0\|[1-9]\d*\|x\|X\|\*])(\.(0\|[1-9]\d*\|x\|X\|\*))?(\.(0\|[1-9]\d*\|x\|X\|\*]))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)*$`
|
-| `channels` _string array_ | channels is an optional reference to a set of channels belonging to
the package specified in the packageName field.
A "channel" is a package author defined stream of updates for an extension.
When specified, it is used to constrain the set of installable bundles and
the automated upgrade path. This constraint is an AND operation with the
version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- Automatic upgrades will be constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
This field follows the DNS subdomain name standard as defined in [RFC
1123]. This means that valid entries:
- Contain no more than 253 characters
- Contain only lowercase alphanumeric characters, '-', or '.'
- Start with an alphanumeric character
- End with an alphanumeric character
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | |
+| `packageName` _string_ | packageName is a reference to the name of the package to be installed
and is used to filter the content from catalogs.
packageName is required, immutable, and follows the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters.
Some examples of valid values are:
- some-package
- 123-package
- 1-package-2
- somepackage
Some examples of invalid values are:
- -some-package
- some-package-
- thisisareallylongpackagenamethatisgreaterthanthemaximumlength
- some.package
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
|
+| `version` _string_ | version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or
more comparison operators, known as comparison strings. Additional
comparison strings can be added using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
|
+| `channels` _string array_ | channels is an optional reference to a set of channels belonging to
the package specified in the packageName field.
A "channel" is a package-author-defined stream of updates for an extension.
Each channel in the list must follow the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters. No more than 256 channels can be specified.
When specified, it is used to constrain the set of installable bundles and
the automated upgrade path. This constraint is an AND operation with the
version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- Automatic upgrades will be constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256
|
| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is an optional field that can be used
to filter the set of ClusterCatalogs used in the bundle
selection process.
When unspecified, all ClusterCatalogs will be used in
the bundle selection process. | | |
-| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is an optional field that controls whether
the upgrade path(s) defined in the catalog are enforced for the package
referenced in the packageName field.
Allowed values are: ["CatalogProvided", "SelfCertified"].
When this field is set to "CatalogProvided", automatic upgrades will only occur
when upgrade constraints specified by the package author are met.
When this field is set to "SelfCertified", the upgrade constraints specified by
the package author are ignored. This allows for upgrades and downgrades to
any version of the package. This is considered a dangerous operation as it
can lead to unknown and potentially disastrous outcomes, such as data
loss. It is assumed that users have independently verified changes when
using this option.
If unspecified, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
|
+| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is an optional field that controls whether
the upgrade path(s) defined in the catalog are enforced for the package
referenced in the packageName field.
Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
When this field is set to "CatalogProvided", automatic upgrades will only occur
when upgrade constraints specified by the package author are met.
When this field is set to "SelfCertified", the upgrade constraints specified by
the package author are ignored. This allows for upgrades and downgrades to
any version of the package. This is considered a dangerous operation as it
can lead to unknown and potentially disastrous outcomes, such as data
loss. It is assumed that users have independently verified changes when
using this option.
When this field is omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
|
#### ClusterExtension
@@ -97,13 +97,13 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `apiVersion` _string_ | `olm.operatorframework.io/v1alpha1` | | |
+| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterExtension` | | |
| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
-| `spec` _[ClusterExtensionSpec](#clusterextensionspec)_ | | | |
-| `status` _[ClusterExtensionStatus](#clusterextensionstatus)_ | | | |
+| `spec` _[ClusterExtensionSpec](#clusterextensionspec)_ | spec is an optional field that defines the desired state of the ClusterExtension. | | |
+| `status` _[ClusterExtensionStatus](#clusterextensionstatus)_ | status is an optional field that defines the observed state of the ClusterExtension. | | |
#### ClusterExtensionInstallConfig
@@ -120,16 +120,14 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `namespace` _string_ | namespace is a reference to the Namespace in which the bundle of
content for the package referenced in the packageName field will be applied.
The bundle may contain cluster-scoped resources or resources that are
applied to other Namespaces. This Namespace is expected to exist.
namespace is required, immutable, and follows the DNS label standard
as defined in [RFC 1123]. This means that valid values:
- Contain no more than 63 characters
- Contain only lowercase alphanumeric characters or '-'
- Start with an alphanumeric character
- End with an alphanumeric character
Some examples of valid values are:
- some-namespace
- 123-namespace
- 1-namespace-2
- somenamespace
Some examples of invalid values are:
- -some-namespace
- some-namespace-
- thisisareallylongnamespacenamethatisgreaterthanthemaximumlength
- some.namespace
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63
Pattern: `^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`
|
-| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount is a required reference to a ServiceAccount that exists
in the installNamespace. The provided ServiceAccount is used to install and
manage the content for the package specified in the packageName field.
In order to successfully install and manage the content for the package,
the ServiceAccount provided via this field should be configured with the
appropriate permissions to perform the necessary operations on all the
resources that are included in the bundle of content being applied. | | |
-| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is an optional field that can be used to configure the preflight checks run before installation or upgrade of the content for the package specified in the packageName field.
When specified, it overrides the default configuration of the preflight checks that are required to execute successfully during an install/upgrade operation.
When not specified, the default configuration for each preflight check will be used. | | |
+| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is an optional field that can be used to configure the checks that are
run before installation or upgrade of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
When not specified, the default configuration will be used. | | |
#### ClusterExtensionInstallStatus
-
+ClusterExtensionInstallStatus is a representation of the status of the identified bundle.
@@ -138,7 +136,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is a representation of the currently installed bundle.
A "bundle" is a versioned set of content that represents the resources that
need to be applied to a cluster to install a package. | | |
+| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is a required field which represents the identifying attributes of a bundle.
A "bundle" is a versioned set of content that represents the resources that
need to be applied to a cluster to install a package. | | Required: \{\}
|
#### ClusterExtensionList
@@ -153,12 +151,12 @@ ClusterExtensionList contains a list of ClusterExtension
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `apiVersion` _string_ | `olm.operatorframework.io/v1alpha1` | | |
+| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterExtensionList` | | |
| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
-| `items` _[ClusterExtension](#clusterextension) array_ | | | |
+| `items` _[ClusterExtension](#clusterextension) array_ | items is a required list of ClusterExtension objects. | | Required: \{\}
|
#### ClusterExtensionSpec
@@ -174,15 +172,17 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `source` _[SourceConfig](#sourceconfig)_ | source is a required field which selects the installation source of content
for this ClusterExtension. Selection is performed by setting the sourceType.
Catalog is currently the only implemented sourceType, and setting the
sourcetype to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | |
-| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is a required field used to configure the installation options
for the ClusterExtension such as the installation namespace,
the service account and the pre-flight check configuration.
Below is a minimal example of an installation definition (in yaml):
install:
namespace: example-namespace
serviceAccount:
name: example-sa | | |
+| `namespace` _string_ | namespace is a reference to a Kubernetes namespace.
This is the namespace in which the provided ServiceAccount must exist.
It also designates the default namespace where namespace-scoped resources
for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
namespace is required, immutable, and follows the DNS label standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
start and end with an alphanumeric character, and be no longer than 63 characters
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63
Required: \{\}
|
+| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount is a reference to a ServiceAccount used to perform all interactions
with the cluster that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
serviceAccount is required. | | Required: \{\}
|
+| `source` _[SourceConfig](#sourceconfig)_ | source is a required field which selects the installation source of content
for this ClusterExtension. Selection is performed by setting the sourceType.
Catalog is currently the only implemented sourceType, and setting the
sourcetype to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
|
+| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is an optional field used to configure the installation options
for the ClusterExtension such as the pre-flight check configuration. | | |
#### ClusterExtensionStatus
-ClusterExtensionStatus defines the observed state of ClusterExtension.
+ClusterExtensionStatus defines the observed state of a ClusterExtension.
@@ -191,8 +191,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | | | |
-| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions is a representation of the current state for this ClusterExtension.
The status is represented by a set of "conditions".
Each condition is generally structured in the following format:
- Type: a string representation of the condition type. More or less the condition "name".
- Status: a string representation of the state of the condition. Can be one of ["True", "False", "Unknown"].
- Reason: a string representation of the reason for the current state of the condition. Typically useful for building automation around particular Type+Reason combinations.
- Message: a human readable message that further elaborates on the state of the condition
The global set of condition types are:
- "Installed", represents whether or not the a bundle has been installed for this ClusterExtension
- "Progressing", represents whether or not the ClusterExtension is progressing towards a new state
When the ClusterExtension is sourced from a catalog, the following conditions are also possible:
- "Deprecated", represents an aggregation of the PackageDeprecated, ChannelDeprecated, and BundleDeprecated condition types
- "PackageDeprecated", represents whether or not the package specified in the spec.source.catalog.packageName field has been deprecated
- "ChannelDeprecated", represents whether or not any channel specified in spec.source.catalog.channels has been deprecated
- "BundleDeprecated", represents whether or not the installed bundle is deprecated
The current set of reasons are:
- "Succeeded", this reason is set on the "Installed" and "Progressing" conditions when initial installation and progressing to a new state is successful
- "Failed", this reason is set on the "Installed" condition when an error has occurred while performing the initial installation.
- "Blocked", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that requires manual intervention for recovery
- "Retrying", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that could be resolved on subsequent reconciliation attempts
- "Deprecated", this reason is set on the "Deprecated", "PackageDeprecated", "ChannelDeprecated", and "BundleDeprecated" conditions to signal that the installed package has been deprecated at the particular scope | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
These are indications from a package owner to guide users away from a particular package, channel, or bundle.
BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
PackageDeprecated is set if the requested package is marked deprecated in the catalog.
Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | |
+| `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | install is a representation of the current installation status for this ClusterExtension. | | |
#### PreflightConfig
@@ -208,23 +208,23 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
checks that run prior to upgrades of installed content.
The CRD Upgrade Safety pre-flight check safeguards from unintended
consequences of upgrading a CRD, such as data loss.
This field is required if the spec.install.preflight field is specified. | | |
+| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
checks that run prior to upgrades of installed content.
The CRD Upgrade Safety pre-flight check safeguards from unintended
consequences of upgrading a CRD, such as data loss. | | |
#### ServiceAccountReference
-ServiceAccountReference references a serviceAccount.
+ServiceAccountReference identifies the serviceAccount used fo install a ClusterExtension.
_Appears in:_
-- [ClusterExtensionInstallConfig](#clusterextensioninstallconfig)
+- [ClusterExtensionSpec](#clusterextensionspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount
to be used for installation and management of the content for the package
specified in the packageName field.
This ServiceAccount is expected to exist in the installNamespace.
This field follows the DNS subdomain name standard as defined in [RFC
1123]. This means that valid values:
- Contain no more than 253 characters
- Contain only lowercase alphanumeric characters, '-', or '.'
- Start with an alphanumeric character
- End with an alphanumeric character
Some examples of valid values are:
- some-serviceaccount
- 123-serviceaccount
- 1-serviceaccount-2
- someserviceaccount
- some.serviceaccount
Some examples of invalid values are:
- -some-serviceaccount
- some-serviceaccount-
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Pattern: `^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
|
+| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount
to be used for installation and management of the content for the package
specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
name follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
- 123-serviceaccount
- 1-serviceaccount-2
- someserviceaccount
- some.serviceaccount
Some examples of invalid values are:
- -some-serviceaccount
- some-serviceaccount-
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
|
#### SourceConfig
@@ -240,8 +240,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `sourceType` _string_ | sourceType is a required reference to the type of install source.
Allowed values are ["Catalog"]
When this field is set to "Catalog", information for determining the appropriate
bundle of content to install will be fetched from ClusterCatalog resources existing
on the cluster. When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog]
|
-| `catalog` _[CatalogSource](#catalogsource)_ | catalog is used to configure how information is sourced from a catalog. This field must be defined when sourceType is set to "Catalog",
and must be the only field defined for this sourceType. | | |
+| `sourceType` _string_ | sourceType is a required reference to the type of install source.
Allowed values are "Catalog"
When this field is set to "Catalog", information for determining the
appropriate bundle of content to install will be fetched from
ClusterCatalog resources existing on the cluster.
When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog]
Required: \{\}
|
+| `catalog` _[CatalogSource](#catalogsource)_ | catalog is used to configure how information is sourced from a catalog.
This field is required when sourceType is "Catalog", and forbidden otherwise. | | |
#### UpgradeConstraintPolicy
diff --git a/docs/concepts/controlling-catalog-selection.md b/docs/concepts/controlling-catalog-selection.md
index 68d19c2b3..dc2f90ab8 100644
--- a/docs/concepts/controlling-catalog-selection.md
+++ b/docs/concepts/controlling-catalog-selection.md
@@ -18,7 +18,7 @@ To select a specific catalog by name, you can use the `matchLabels` field in you
#### Example
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: my-extension
@@ -39,7 +39,7 @@ If you have catalogs labeled with specific metadata, you can select them using `
#### Using `matchLabels`
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: my-extension
@@ -56,7 +56,7 @@ This selects catalogs labeled with `example.com/support: "true"`.
#### Using `matchExpressions`
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: my-extension
@@ -81,7 +81,7 @@ You can exclude catalogs by using the `NotIn` or `DoesNotExist` operators in `ma
#### Example: Exclude Specific Catalogs
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: my-extension
@@ -101,7 +101,7 @@ This excludes the catalog named `unwanted-catalog` from consideration.
#### Example: Exclude Catalogs with a Specific Label
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: my-extension
@@ -125,7 +125,7 @@ When multiple catalogs provide the same package, you can set priorities to resol
In your `ClusterCatalog` resource, set the `priority` field:
```yaml
-apiVersion: catalogd.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterCatalog
metadata:
name: high-priority-catalog
@@ -159,70 +159,71 @@ If the system cannot resolve to a single bundle due to ambiguity, it will genera
1. **Create or Update `ClusterCatalogs` with Appropriate Labels and Priority**
- ```yaml
- apiVersion: catalogd.operatorframework.io/v1alpha1
- kind: ClusterCatalog
- metadata:
- name: catalog-a
- labels:
- example.com/support: "true"
- spec:
- priority: 1000
- source:
- type: Image
- image:
- ref: quay.io/example/content-management-a:latest
- ```
-
- ```yaml
- apiVersion: catalogd.operatorframework.io/v1alpha1
- kind: ClusterCatalog
- metadata:
- name: catalog-b
- labels:
- example.com/support: "false"
- spec:
- priority: 500
- source:
- type: Image
- image:
- ref: quay.io/example/content-management-b:latest
- ```
- NB: an `olm.operatorframework.io/metadata.name` label will be added automatically to ClusterCatalogs when applied
+ ```yaml
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterCatalog
+ metadata:
+ name: catalog-a
+ labels:
+ example.com/support: "true"
+ spec:
+ priority: 1000
+ source:
+ type: Image
+ image:
+ ref: quay.io/example/content-management-a:latest
+ ```
+
+ ```yaml
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterCatalog
+ metadata:
+ name: catalog-b
+ labels:
+ example.com/support: "false"
+ spec:
+ priority: 500
+ source:
+ type: Image
+ image:
+ ref: quay.io/example/content-management-b:latest
+ ```
+ !!! note
+ An `olm.operatorframework.io/metadata.name` label will be added automatically to ClusterCatalogs when applied
2. **Create a `ClusterExtension` with Catalog Selection**
- ```yaml
- apiVersion: olm.operatorframework.io/v1alpha1
- kind: ClusterExtension
- metadata:
- name: install-my-operator
- spec:
- packageName: my-operator
- catalog:
- selector:
- matchLabels:
- example.com/support: "true"
- ```
+ ```yaml
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name: install-my-operator
+ spec:
+ packageName: my-operator
+ catalog:
+ selector:
+ matchLabels:
+ example.com/support: "true"
+ ```
3. **Apply the Resources**
- ```shell
- kubectl apply -f content-management-a.yaml
- kubectl apply -f content-management-b.yaml
- kubectl apply -f install-my-operator.yaml
- ```
+ ```shell
+ kubectl apply -f content-management-a.yaml
+ kubectl apply -f content-management-b.yaml
+ kubectl apply -f install-my-operator.yaml
+ ```
4. **Verify the Installation**
- Check the status of the `ClusterExtension`:
+ Check the status of the `ClusterExtension`:
- ```shell
- kubectl get clusterextension install-my-operator -o yaml
- ```
+ ```shell
+ kubectl get clusterextension install-my-operator -o yaml
+ ```
- The status should indicate that the bundle was resolved from `catalog-a` due to the higher priority and matching label.
+ The status should indicate that the bundle was resolved from `catalog-a` due to the higher priority and matching label.
## Important Notes
diff --git a/docs/concepts/crd-upgrade-safety.md b/docs/concepts/crd-upgrade-safety.md
index 47ad18d7b..339315472 100644
--- a/docs/concepts/crd-upgrade-safety.md
+++ b/docs/concepts/crd-upgrade-safety.md
@@ -56,7 +56,7 @@ The CRD Upgrade Safety preflight check can be entirely disabled by adding the
`preflight.crdUpgradeSafety.disabled` field with a value of "true" to the ClusterExtension of the CRD.
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: clusterextension-sample
diff --git a/docs/concepts/upgrade-support.md b/docs/concepts/upgrade-support.md
index 9bc6e31ad..8287ff2b3 100644
--- a/docs/concepts/upgrade-support.md
+++ b/docs/concepts/upgrade-support.md
@@ -17,10 +17,12 @@ When determining upgrade edges, also known as upgrade paths or upgrade constrain
By supporting legacy OLM semantics, OLM v1 now honors the upgrade graph from catalogs accurately.
-* If there are multiple possible successors, OLM v1 behavior differs in the following ways:
- * In legacy OLM, the successor closest to the channel head is chosen.
- * In OLM v1, the successor with the highest semantic version (semver) is chosen.
-* Consider the following set of file-based catalog (FBC) channel entries:
+If there are multiple possible successors, OLM v1 behavior differs in the following ways:
+
+* In legacy OLM, the successor closest to the channel head is chosen.
+* In OLM v1, the successor with the highest semantic version (semver) is chosen.
+
+Consider the following set of file-based catalog (FBC) channel entries:
```yaml
# ...
@@ -38,7 +40,7 @@ If `1.0.0` is installed, OLM v1 behavior differs in the following ways:
You can change the default behavior of the upgrade constraints by setting the `upgradeConstraintPolicy` parameter in your cluster extension's custom resource (CR).
``` yaml hl_lines="10"
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name:
@@ -51,7 +53,7 @@ spec:
version: ""
```
-where setting the `upgradeConstraintPolicy` to:
+Setting the `upgradeConstraintPolicy` to:
`SelfCertified`
: Does not limit the next version to the set of successors, and instead allows for any downgrade, sidegrade, or upgrade.
@@ -63,8 +65,8 @@ where setting the `upgradeConstraintPolicy` to:
OLM supports Semver to provide a simplified way for package authors to define compatible upgrades. According to the Semver standard, releases within a major version (e.g. `>=1.0.0 <2.0.0`) must be compatible. As a result, package authors can publish a new package version following the Semver specification, and OLM assumes compatibility. Package authors do not have to explicitly define upgrade edges in the catalog.
-> [!NOTE]
-> Currently, OLM 1.0 does not support automatic upgrades to the next major version. You must manually verify and perform major version upgrades. For more information about major version upgrades, see [Manually verified upgrades and downgrades](#manually-verified-upgrades-and-downgrades).
+!!! note
+ Currently, OLM 1.0 does not support automatic upgrades to the next major version. You must manually verify and perform major version upgrades. For more information about major version upgrades, see [Manually verified upgrades and downgrades](#manually-verified-upgrades-and-downgrades).
### Upgrades within the major version zero
@@ -77,7 +79,8 @@ You must verify and perform upgrades manually in cases where automatic upgrades
## Manually verified upgrades and downgrades
-**Warning:** If you want to force an upgrade manually, you must thoroughly verify the outcome before applying any changes to production workloads. Failure to test and verify the upgrade might lead to catastrophic consequences such as data loss.
+!!! warning
+ If you want to force an upgrade manually, you must thoroughly verify the outcome before applying any changes to production workloads. Failure to test and verify the upgrade might lead to catastrophic consequences such as data loss.
As a package admin, if you must upgrade or downgrade to version that might be incompatible with the currently installed version, you can set the `.spec.upgradeConstraintPolicy` field to `SelfCertified` on the relevant `ClusterExtension` resource.
@@ -86,7 +89,7 @@ If you set the field to `SelfCertified`, no upgrade constraints are set on the p
Example `ClusterExtension` with `.spec.upgradeConstraintPolicy` field set to `SelfCertified`:
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: extension-sample
diff --git a/docs/contribute/developer.md b/docs/contribute/developer.md
index b97c9d693..8a63f0d7c 100644
--- a/docs/contribute/developer.md
+++ b/docs/contribute/developer.md
@@ -3,10 +3,10 @@
The following `make run` starts a [KIND](https://sigs.k8s.io/kind) cluster for you to get a local cluster for testing, see the manual install steps below for how to run against a remote cluster.
-> [!NOTE]
-> You will need a container runtime environment, like Docker, or experimentally, Podman, installed, to run Kind.
->
-> If you are on MacOS, see [Special Setup for MacOS](#special-setup-for-macos).
+!!! note
+ You will need a container runtime environment, like Docker, or experimentally, Podman, installed, to run Kind.
+
+ If you are on MacOS, see [Special Setup for MacOS](#special-setup-for-macos).
### Quickstart Installation
@@ -20,9 +20,9 @@ This will build a local container image of the operator-controller, create a new
### To Install Any Given Release
-> [!CAUTION]
-> Operator-Controller depends on [cert-manager](https://cert-manager.io/). Running the following command
-> may affect an existing installation of cert-manager and cause cluster instability.
+!!! warning
+ Operator-Controller depends on [cert-manager](https://cert-manager.io/). Running the following command
+ may affect an existing installation of cert-manager and cause cluster instability.
The latest version of Operator Controller can be installed with the following command:
@@ -33,21 +33,21 @@ curl -L -s https://github.com/operator-framework/operator-controller/releases/la
### Manual Step-by-Step Installation
1. Install Instances of Custom Resources:
-```sh
-kubectl apply -f config/samples/
-```
+ ```sh
+ kubectl apply -f config/samples/
+ ```
2. Build and push your image to the location specified by `IMG`:
-```sh
-make docker-build docker-push IMG=/operator-controller:tag
-```
+ ```sh
+ make docker-build docker-push IMG=/operator-controller:tag
+ ```
3. Deploy the controller to the cluster with the image specified by `IMG`:
-```sh
-make deploy IMG=/operator-controller:tag
-```
+ ```sh
+ make deploy IMG=/operator-controller:tag
+ ```
### Uninstall CRDs
To delete the CRDs from the cluster:
@@ -72,7 +72,8 @@ make manifests
---
-**NOTE:** Run `make help` for more information on all potential `make` targets.
+!!! note
+ Run `make help` for more information on all potential `make` targets.
### Rapid Iterative Development with Tilt
@@ -124,17 +125,18 @@ This is typically as short as:
tilt up
```
-**NOTE:** if you are using Podman, at least as of v4.5.1, you need to do this:
+!!! note
+ If you are using Podman, at least as of v4.5.1, you need to do this:
-```shell
-DOCKER_BUILDKIT=0 tilt up
-```
+ ```shell
+ DOCKER_BUILDKIT=0 tilt up
+ ```
-Otherwise, you'll see an error when Tilt tries to build your image that looks similar to:
+ Otherwise, you'll see an error when Tilt tries to build your image that looks similar to:
-```text
-Build Failed: ImageBuild: stat /var/tmp/libpod_builder2384046170/build/Dockerfile: no such file or directory
-```
+ ```text
+ Build Failed: ImageBuild: stat /var/tmp/libpod_builder2384046170/build/Dockerfile: no such file or directory
+ ```
When Tilt starts, you'll see something like this in your terminal:
diff --git a/docs/getting-started/olmv1_getting_started.md b/docs/getting-started/olmv1_getting_started.md
index 1156bc968..0763f9263 100644
--- a/docs/getting-started/olmv1_getting_started.md
+++ b/docs/getting-started/olmv1_getting_started.md
@@ -2,9 +2,9 @@
The following script will install OLMv1 on a Kubernetes cluster. If you don't have one, you can deploy a Kubernetes cluster with [KIND](https://sigs.k8s.io/kind).
-> [!CAUTION]
-> Operator-Controller depends on [cert-manager](https://cert-manager.io/). Running the following command
-> may affect an existing installation of cert-manager and cause cluster instability.
+!!! warning
+ Operator-Controller depends on [cert-manager](https://cert-manager.io/). Running the following command
+ may affect an existing installation of cert-manager and cause cluster instability.
The latest version of Operator Controller can be installed with the following command:
@@ -35,7 +35,7 @@ To create the catalog, run the following command:
```bash
# Create ClusterCatalog
kubectl apply -f - <, ...]
```
-Note: The `resourceNames` field should be populated with the names of the `ClusterRole`s and `ClusterRoleBinding`s created by OLM v1.
-These names are generated with the following format: `.`. Since it is not a trivial task
-to generate these names ahead of time, it is recommended to use a wildcard `*` in the `resourceNames` field for the installation.
-Then, update the `resourceNames` fields by inspecting the cluster for the generated resource names. For instance, for `ClusterRole`s:
+!!! note
+ The `resourceNames` field should be populated with the names of the `ClusterRole`s and `ClusterRoleBinding`s created by OLM v1.
+ These names are generated with the following format: `.`. Since it is not a trivial task
+ to generate these names ahead of time, it is recommended to use a wildcard `*` in the `resourceNames` field for the installation.
+ Then, update the `resourceNames` fields by inspecting the cluster for the generated resource names. For instance, for `ClusterRole`s:
```terminal
kubectl get clusterroles | grep argocd
@@ -97,9 +98,9 @@ argocd-operator.v0-22gmilmgp91wu25is5i2ec598hni8owq3l71bbkl7iz3 2024-09-3
The same can be done for `ClusterRoleBindings`.
-##### Step 2. `CustomResourceDefinition` permissions
+#### Step 2. `CustomResourceDefinition` permissions
-The installer service account must be able to create and manage the `CustomResourceDefinition`s for the extension, as well
+The installer service account must be able to create and manage the `CustomResourceDefinition`s for the extension, as well
as grant the extension controller's service account the permissions it needs to manage its CRDs.
```yaml
@@ -113,7 +114,7 @@ as grant the extension controller's service account the permissions it needs to
resourceNames: [applications.argoproj.io, appprojects.argoproj.io, argocds.argoproj.io, argocdexports.argoproj.io, applicationsets.argoproj.io]
```
-##### Step 3. `OwnerReferencesPermissionEnforcement` permissions
+#### Step 3. `OwnerReferencesPermissionEnforcement` permissions
For clusters that use `OwnerReferencesPermissionEnforcement`, the installer service account must be able to update finalizers on the ClusterExtension to be able to set blockOwnerDeletion and ownerReferences for clusters that use `OwnerReferencesPermissionEnforcement`.
This is only a requirement for clusters that use the [OwnerReferencesPermissionEnforcement](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement) admission plug-in.
@@ -126,7 +127,7 @@ This is only a requirement for clusters that use the [OwnerReferencesPermissionE
resourceNames: [argocd-operator.v0.6.0]
```
-##### Step 4. Bundled cluster-scoped resource permissions
+#### Step 4. Bundled cluster-scoped resource permissions
Permissions must be added for the creation and management of any cluster-scoped resources included in the bundle.
In this example, the ArgoCD bundle contains a `ClusterRole` called `argocd-operator-metrics-reader`. Given that
@@ -140,12 +141,13 @@ is sufficient to add the `argocd-operator-metrics-reader`resource name to the `r
resourceNames: [, ..., argocd-operator-metrics-reader]
```
-##### Step 5. Operator permissions declared in the ClusterServiceVersion
+#### Step 5. Operator permissions declared in the ClusterServiceVersion
Include all permissions defined in the `.spec.install.permissions` ([reference](https://github.com/argoproj-labs/argocd-operator/blob/da6b8a7e68f71920de9545152714b9066990fc4b/deploy/olm-catalog/argocd-operator/0.6.0/argocd-operator.v0.6.0.clusterserviceversion.yaml#L1091)) and `.spec.install.clusterPermissions` ([reference](https://github.com/argoproj-labs/argocd-operator/blob/da6b8a7e68f71920de9545152714b9066990fc4b/deploy/olm-catalog/argocd-operator/0.6.0/argocd-operator.v0.6.0.clusterserviceversion.yaml#L872)) stanzas in the bundle's `ClusterServiceVersion`.
These permissions are required by the extension controller, and therefore the installer service account must be able to grant them.
-Note: there may be overlap between the rules defined in each stanza. Overlapping rules needn't be added twice.
+!!! note
+ There may be overlap between the rules defined in each stanza. Overlapping rules needn't be added twice.
```yaml
# from .spec.install.clusterPermissions
@@ -224,12 +226,12 @@ Note: there may be overlap between the rules defined in each stanza. Overlapping
# verbs: ["create", "patch"]
```
-#### Derive permissions for the installer service account `Role`
+### Derive permissions for the installer service account `Role`
The following steps detail how to define the namespace-scoped permissions needed by the installer service account's `Role`.
The installer service account must create and manage the `RoleBinding`s for the extension controller(s).
-##### Step 1. `Deployment` permissions
+#### Step 1. `Deployment` permissions
The installer service account must be able to create and manage the `Deployment`s for the extension controller(s).
The `Deployment` name(s) can be found in the `ClusterServiceVersion` resource packed in the bundle under `.spec.install.deployments` ([reference](https://github.com/argoproj-labs/argocd-operator/blob/da6b8a7e68f71920de9545152714b9066990fc4b/deploy/olm-catalog/argocd-operator/0.6.0/argocd-operator.v0.6.0.clusterserviceversion.yaml#L1029)).
@@ -246,7 +248,7 @@ This example's `ClusterServiceVersion` can be found [here](https://github.com/ar
resourceNames: [argocd-operator-controller-manager]
```
-##### Step 2: `ServiceAccount` permissions
+#### Step 2: `ServiceAccount` permissions
The installer service account must be able to create and manage the `ServiceAccount`(s) for the extension controller(s).
The `ServiceAccount` name(s) can be found in deployment template in the `ClusterServiceVersion` resource packed in the bundle under `.spec.install.deployments`.
@@ -263,7 +265,7 @@ This example's `ClusterServiceVersion` can be found [here](https://github.com/ar
resourceNames: [argocd-operator-controller-manager]
```
-##### Step 3. Bundled namespace-scoped resource permissions
+#### Step 3. Bundled namespace-scoped resource permissions
The installer service account must also create and manage other namespace-scoped resources included in the bundle.
In this example, the bundle also includes two additional namespace-scoped resources:
@@ -291,9 +293,10 @@ Therefore, the following permissions must be given to the installer service acco
resourceNames: [argocd-operator-manager-config]
```
-#### Putting it all together
+### Putting it all together
Once the installer service account required cluster-scoped and namespace-scoped permissions have been collected:
+
1. Create the installation namespace
2. Create the installer `ServiceAccount`
3. Create the installer `ClusterRole`
@@ -302,15 +305,15 @@ Once the installer service account required cluster-scoped and namespace-scoped
6. Create the `RoleBinding` between the installer service account and its role
7. Create the `ClusterExtension`
-A manifest with the full set of resources can be found [here](https://github.com/operator-framework/operator-controller/blob/main/config/samples/olm_v1alpha1_clusterextension.yaml).
+A manifest with the full set of resources can be found [here](https://github.com/operator-framework/operator-controller/blob/main/config/samples/olm_v1_clusterextension.yaml).
-### Alternatives
+## Alternatives
We understand that manually determining the minimum RBAC required for installation/upgrade of a `ClusterExtension` quite complex and protracted.
In the near future, OLM v1 will provide tools and automation in order to simplify this process while maintaining our security posture.
For users wishing to test out OLM v1 in a non-production settings, we offer the following alternatives:
-#### Give the installer service account admin privileges
+### Give the installer service account admin privileges
The `cluster-admin` `ClusterRole` can be bound to the installer service account giving it full permissions to the cluster.
While this obviates the need to determine the minimal RBAC required for installation, it is also dangerous. It is highly recommended
@@ -344,9 +347,9 @@ kubectl create clusterrolebinding my-cluster-extension-installer-role-binding \
--serviceaccount=my-cluster-extension-namespace:my-cluster-installer-service-account
```
-#### hack/tools/catalog
+### hack/tools/catalog
In the spirit of making this process more tenable until the proper tools are in place, the scripts
in [hack/tools/catalogs](https://github.com/operator-framework/operator-controller/blob/main/hack/tools/catalogs) were created to help the user navigate and search catalogs as well
-as to generate the minimal RBAC requirements. These tools are offered as is, with no guarantees on their correctness,
+as to generate the minimal RBAC requirements. These tools are offered as is, with no guarantees on their correctness,
support, or maintenance. For more information, see [Hack Catalog Tools](https://github.com/operator-framework/operator-controller/blob/main/hack/tools/catalogs/README.md).
diff --git a/docs/howto/how-to-channel-based-upgrades.md b/docs/howto/how-to-channel-based-upgrades.md
index 501a7f951..e7638d1a1 100644
--- a/docs/howto/how-to-channel-based-upgrades.md
+++ b/docs/howto/how-to-channel-based-upgrades.md
@@ -5,7 +5,7 @@ A "channel" is a package author defined stream of updates for an extension. A se
Example:
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: argocd
@@ -27,7 +27,7 @@ Note that the `version` field also accepts a version range to further restrict t
Example:
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: argocd
diff --git a/docs/howto/how-to-grant-api-access.md b/docs/howto/how-to-grant-api-access.md
new file mode 100644
index 000000000..e73464c43
--- /dev/null
+++ b/docs/howto/how-to-grant-api-access.md
@@ -0,0 +1,174 @@
+
+# Granting Users Access to API Resources in OLM
+
+When cluster extensions are managed via OLM, they often provide Custom Resource Definitions (CRDs) that expose new API resources. Typically, cluster administrators have full management access to these resources by default, whereas non-administrative users might lack sufficient permissions. Cluster administrators must create the needed permissions to create, view, or edit these Custom Resources for these users.
+
+OLM v1 does **not** automatically configure or manage role-based access control (RBAC) for users to interact with the APIs provided by installed packages. Cluster administrators must manage RBAC to grant appropriate permissions to non-administrative users. This guide outlines the steps to manually configure RBAC, with a focus on creating ClusterRoles and binding them to specific users or groups.
+
+---
+
+## 1. Finding API Groups and Resources Provided by a ClusterExtension
+
+To create appropriate RBAC policies, you need to know which API groups and resources are exposed by the installed cluster extension. You can inspect the installed CRDs and resources by running:
+
+```bash
+kubectl get crds
+```
+
+This will list all available CRDs, and you can inspect individual CRDs for their API groups:
+
+```bash
+kubectl get crd -o yaml
+```
+
+A user can use label selectors to find CRDs owned by a specific cluster extension:
+
+```bash
+kubectl get crds -l 'olm.operatorframework.io/owner-kind=ClusterExtension,olm.operatorframework.io/owner-name='
+```
+
+---
+
+## 2. Creating Default ClusterRoles for API/CRD Access
+
+Administrators can define standard roles to control access to the API resources provided by installed cluster extensions. If the cluster extension does not provide default roles, you can create them yourself.
+
+### Default Roles
+
+- **View ClusterRole**: Grants read-only access to all custom resource objects of specified API resources across the cluster. This role is intended for users who need visibility into the resources without any permissions to modify them. It’s ideal for monitoring purposes and limited access viewing.
+- **Edit ClusterRole**: Allows users to modify all custom resource objects within the cluster. This role enables users to create, update, and delete resources, making it suitable for team members who need to manage resources but should not control RBAC or manage permissions for others.
+- **Admin ClusterRole**: Provides full permissions (create, update, delete) over all custom resource objects for the specified API resources across the cluster.
+
+### Example: Defining a Custom "View" ClusterRole
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: custom-resource-view
+rules:
+- apiGroups:
+ -
+ resources:
+ -
+ verbs:
+ - get
+ - list
+ - watch
+```
+
+### Example: Defining a Custom "Edit" ClusterRole
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: custom-resource-edit
+rules:
+- apiGroups:
+ -
+ resources:
+ -
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+```
+
+### Example: Defining a Custom "Admin" ClusterRole
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: custom-resource-admin
+rules:
+- apiGroups:
+ -
+ resources:
+ -
+ verbs:
+ - '*'
+```
+**Note**: The `'*'` in verbs allows all actions on the specified resources
+In each case, replace `` and `` with the actual API group and resource names provided by the installed cluster extension.
+
+---
+
+## 3. Granting User Access to API Resources
+
+Once the roles are created, you can bind them to specific users or groups to grant them the necessary permissions. There are two main ways to do this:
+
+### Option 1: Binding Default ClusterRoles to Users
+
+- **ClusterRoleBinding**: Use this to grant access across all namespaces.
+- **RoleBinding**: Use this to grant access within a specific namespace.
+
+#### Example: ClusterRoleBinding
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: custom-resource-view-binding
+subjects:
+- kind: User
+ name: # Or use Group for group-based binding
+roleRef:
+ kind: ClusterRole
+ name: custom-resource-view
+ apiGroup: rbac.authorization.k8s.io
+```
+
+This binding grants `` read-only access to the custom resource across all namespaces.
+
+#### Example: RoleBinding
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: custom-resource-edit-binding
+ namespace:
+subjects:
+- kind: User
+ name:
+roleRef:
+ kind: Role
+ name: custom-resource-edit
+ apiGroup: rbac.authorization.k8s.io
+```
+
+This RoleBinding restricts permissions to a specific namespace.
+
+### Option 2: Extending Default Kubernetes Roles
+
+To automatically extend existing Kubernetes roles (e.g., the default `view`, `edit`, and `admin` roles), you can add **aggregation labels** to **ClusterRoles**. This allows users who already have `view`, `edit`, or `admin` roles to interact with the custom resource without needing additional RoleBindings.
+
+#### Example: Adding Aggregation Labels to a ClusterRole
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: custom-resource-aggregated-view
+ labels:
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+rules:
+ - apiGroups:
+ -
+ resources:
+ -
+ verbs:
+ - get
+ - list
+ - watch
+```
+
+You can create similar ClusterRoles for `edit` and `admin` with appropriate verbs (such as `create`, `update`, `delete` for `edit` and `admin`). By using aggregation labels, the permissions for the custom resources are added to the default roles.
+
+> **Source**: [Kubernetes RBAC Aggregation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings)
diff --git a/docs/howto/how-to-pin-version.md b/docs/howto/how-to-pin-version.md
index 606b994aa..5dc0660b0 100644
--- a/docs/howto/how-to-pin-version.md
+++ b/docs/howto/how-to-pin-version.md
@@ -5,7 +5,7 @@ To disable automatic updates, and pin the version of an extension, set `version`
Example:
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: argocd
diff --git a/docs/howto/how-to-version-range-upgrades.md b/docs/howto/how-to-version-range-upgrades.md
index ddb753fba..1a502a2e2 100644
--- a/docs/howto/how-to-version-range-upgrades.md
+++ b/docs/howto/how-to-version-range-upgrades.md
@@ -5,7 +5,7 @@ Set the version for the desired package in the Catalog source to a comparison st
Example:
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: argocd
@@ -21,4 +21,4 @@ spec:
name: argocd-installer
```
-For more information on SemVer version ranges see [version-rages](../concepts/version-ranges.md)
\ No newline at end of file
+For more information on SemVer version ranges see [version-ranges](../concepts/version-ranges.md)
diff --git a/docs/howto/how-to-z-stream-upgrades.md b/docs/howto/how-to-z-stream-upgrades.md
index 8666e09b7..1a638fd1b 100644
--- a/docs/howto/how-to-z-stream-upgrades.md
+++ b/docs/howto/how-to-z-stream-upgrades.md
@@ -5,7 +5,7 @@ To restrict automatic updates to only z-stream patches and avoid breaking change
Example:
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: argocd
diff --git a/docs/tutorials/add-catalog.md b/docs/tutorials/add-catalog.md
index 2b75c666d..ff8ffa558 100644
--- a/docs/tutorials/add-catalog.md
+++ b/docs/tutorials/add-catalog.md
@@ -26,7 +26,7 @@ This catalog is distributed as an image [quay.io/operatorhubio/catalog](https://
1. Create a catalog custom resource (CR):
``` yaml title="clustercatalog_cr.yaml"
- apiVersion: olm.operatorframework.io/v1alpha1
+ apiVersion: olm.operatorframework.io/v1
kind: ClusterCatalog
metadata:
name: operatorhubio
@@ -48,7 +48,7 @@ This catalog is distributed as an image [quay.io/operatorhubio/catalog](https://
To disable polling, set a zero value, such as `0s`.
``` yaml title="Example `operatorhubio.yaml` CR"
- apiVersion: olm.operatorframework.io/v1alpha1
+ apiVersion: olm.operatorframework.io/v1
kind: ClusterCatalog
metadata:
name: operatorhub
@@ -96,7 +96,7 @@ This catalog is distributed as an image [quay.io/operatorhubio/catalog](https://
Namespace:
Labels: olm.operatorframework.io/metadata.name=operatorhubio
Annotations:
- API Version: olm.operatorframework.io/v1alpha1
+ API Version: olm.operatorframework.io/v1
Kind: ClusterCatalog
Metadata:
Creation Timestamp: 2024-10-02T19:51:24Z
diff --git a/docs/tutorials/downgrade-extension.md b/docs/tutorials/downgrade-extension.md
index 0e57d4687..ee25a5136 100644
--- a/docs/tutorials/downgrade-extension.md
+++ b/docs/tutorials/downgrade-extension.md
@@ -31,7 +31,7 @@ Add the `crdUpgradeSafety` field and set its `policy` to `Disabled` in the `Clus
**Example:**
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: example-extension
@@ -51,14 +51,14 @@ spec:
upgradeConstraintPolicy: SelfCertified
```
-** Disable CRD Upgrade Safety Check:**
+**Command Example:**
-**Patch the ClusterExtension Resource:**
+If you prefer using the command line, you can use `kubectl` to modify the upgrade CRD safety check configuration.
- ```bash
- kubectl patch clusterextension --patch '{"spec":{"install":{"preflight":{"crdUpgradeSafety":{"policy":"Disabled"}}}}}' --type=merge
- ```
- Kubernetes will apply the updated configuration, disabling CRD safety checks during the downgrade process.
+```bash
+kubectl patch clusterextension --patch '{"spec":{"install":{"preflight":{"crdUpgradeSafety":{"policy":"Disabled"}}}}}' --type=merge
+```
+Kubernetes will apply the updated configuration, disabling CRD safety checks during the downgrade process.
### 2. Ignoring Catalog Provided Upgrade Constraints
@@ -71,7 +71,7 @@ Set the `upgradeConstraintPolicy` to `SelfCertified` in the `ClusterExtension` r
**Example:**
```yaml
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: example-extension
@@ -102,38 +102,39 @@ Once the CRD safety checks are disabled and upgrade constraints are set, you can
1. **Edit the ClusterExtension Resource:**
- Modify the `ClusterExtension` custom resource to specify the target version and adjust the upgrade constraints.
+ Modify the `ClusterExtension` custom resource to specify the target version and adjust the upgrade constraints.
- ```bash
- kubectl edit clusterextension
- ```
+ ```bash
+ kubectl edit clusterextension
+ ```
2. **Update the Version:**
- Within the YAML editor, update the `spec` section as follows:
-
- ```yaml
- apiVersion: olm.operatorframework.io/v1alpha1
- kind: ClusterExtension
- metadata:
- name:
- spec:
- source:
- sourceType: Catalog
- catalog:
- packageName:
- version:
- install:
- namespace:
- serviceAccount:
- name:
- ```
-
- - **`version`:** Specify the target version you wish to downgrade to.
+ Within the YAML editor, update the `spec` section as follows:
+
+ ```yaml
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterExtension
+ metadata:
+ name:
+ spec:
+ source:
+ sourceType: Catalog
+ catalog:
+ packageName:
+ version:
+ install:
+ namespace:
+ serviceAccount:
+ name:
+ ```
+
+ `target_version`
+ : Specify the target version you wish to downgrade to.
3. **Apply the Changes:**
- Save and exit the editor. Kubernetes will apply the changes and initiate the downgrade process.
+ Save and exit the editor. Kubernetes will apply the changes and initiate the downgrade process.
### 4. Post-Downgrade Verification
@@ -143,31 +144,31 @@ After completing the downgrade, verify that the `ClusterExtension` is functionin
1. **Check the Status of the ClusterExtension:**
- ```bash
- kubectl get clusterextension -o yaml
- ```
+ ```bash
+ kubectl get clusterextension -o yaml
+ ```
- Ensure that the `status` reflects the target version and that there are no error messages.
+ Ensure that the `status` reflects the target version and that there are no error messages.
2. **Validate CRD Integrity:**
- Confirm that all CRDs associated with the `ClusterExtension` are correctly installed and compatible with the downgraded version.
+ Confirm that all CRDs associated with the `ClusterExtension` are correctly installed and compatible with the downgraded version.
- ```bash
- kubectl get crd | grep
- ```
+ ```bash
+ kubectl get crd | grep
+ ```
3. **Test Extension Functionality:**
- Perform functional tests to ensure that the extension operates correctly in its downgraded state.
+ Perform functional tests to ensure that the extension operates correctly in its downgraded state.
4. **Monitor Logs:**
- Check the logs of the operator managing the `ClusterExtension` for any warnings or errors.
+ Check the logs of the operator managing the `ClusterExtension` for any warnings or errors.
- ```bash
- kubectl logs deployment/ -n
- ```
+ ```bash
+ kubectl logs deployment/ -n
+ ```
## Troubleshooting
diff --git a/docs/tutorials/explore-available-content.md b/docs/tutorials/explore-available-content.md
index 76bae2b6f..ada0855ef 100644
--- a/docs/tutorials/explore-available-content.md
+++ b/docs/tutorials/explore-available-content.md
@@ -13,8 +13,9 @@ Then you can query the catalog by using `curl` commands and the `jq` CLI tool to
* You have added a ClusterCatalog of extensions, such as [OperatorHub.io](https://operatorhub.io), to your cluster.
* You have installed the `jq` CLI tool.
-**Note:** By default, Catalogd is installed with TLS enabled for the catalog webserver.
-The following examples will show this default behavior, but for simplicity's sake will ignore TLS verification in the curl commands using the `-k` flag.
+!!! note
+ By default, Catalogd is installed with TLS enabled for the catalog webserver.
+ The following examples will show this default behavior, but for simplicity's sake will ignore TLS verification in the curl commands using the `-k` flag.
## Procedure
@@ -93,38 +94,38 @@ The following examples will show this default behavior, but for simplicity's sak
!!! important
Currently, OLM 1.0 does not support the installation of extensions that use webhooks or that target a single or specified set of namespaces.
- * Return list of packages that support `AllNamespaces` install mode and do not use webhooks:
+3. Return list of packages that support `AllNamespaces` install mode and do not use webhooks:
- ``` terminal
- curl -k https://localhost:8443/catalogs/operatorhubio/api/v1/all | jq -c 'select(.schema == "olm.bundle") | {"package":.package, "version":.properties[] | select(.type == "olm.bundle.object").value.data | @base64d | fromjson | select(.kind == "ClusterServiceVersion" and (.spec.installModes[] | select(.type == "AllNamespaces" and .supported == true) != null) and .spec.webhookdefinitions == null).spec.version}'
+ ``` terminal
+ curl -k https://localhost:8443/catalogs/operatorhubio/api/v1/all | jq -c 'select(.schema == "olm.bundle") | {"package":.package, "version":.properties[] | select(.type == "olm.bundle.object").value.data | @base64d | fromjson | select(.kind == "ClusterServiceVersion" and (.spec.installModes[] | select(.type == "AllNamespaces" and .supported == true) != null) and .spec.webhookdefinitions == null).spec.version}'
+ ```
+
+ ??? success
+ ``` text title="Example output"
+ {"package":"ack-acm-controller","version":"0.0.12"}
+ {"package":"ack-acmpca-controller","version":"0.0.5"}
+ {"package":"ack-apigatewayv2-controller","version":"1.0.7"}
+ {"package":"ack-applicationautoscaling-controller","version":"1.0.11"}
+ {"package":"ack-cloudfront-controller","version":"0.0.9"}
+ {"package":"ack-cloudtrail-controller","version":"1.0.8"}
+ {"package":"ack-cloudwatch-controller","version":"0.0.3"}
+ {"package":"ack-cloudwatchlogs-controller","version":"0.0.4"}
+ {"package":"ack-dynamodb-controller","version":"1.2.9"}
+ {"package":"ack-ec2-controller","version":"1.2.4"}
+ {"package":"ack-ecr-controller","version":"1.0.12"}
+ {"package":"ack-ecs-controller","version":"0.0.4"}
+ {"package":"ack-efs-controller","version":"0.0.5"}
+ {"package":"ack-eks-controller","version":"1.3.3"}
+ {"package":"ack-elasticache-controller","version":"0.0.29"}
+ {"package":"ack-emrcontainers-controller","version":"1.0.8"}
+ {"package":"ack-eventbridge-controller","version":"1.0.6"}
+ {"package":"ack-iam-controller","version":"1.3.6"}
+ {"package":"ack-kafka-controller","version":"0.0.4"}
+ {"package":"ack-keyspaces-controller","version":"0.0.11"}
+ ...
```
- ??? success
- ``` text title="Example output"
- {"package":"ack-acm-controller","version":"0.0.12"}
- {"package":"ack-acmpca-controller","version":"0.0.5"}
- {"package":"ack-apigatewayv2-controller","version":"1.0.7"}
- {"package":"ack-applicationautoscaling-controller","version":"1.0.11"}
- {"package":"ack-cloudfront-controller","version":"0.0.9"}
- {"package":"ack-cloudtrail-controller","version":"1.0.8"}
- {"package":"ack-cloudwatch-controller","version":"0.0.3"}
- {"package":"ack-cloudwatchlogs-controller","version":"0.0.4"}
- {"package":"ack-dynamodb-controller","version":"1.2.9"}
- {"package":"ack-ec2-controller","version":"1.2.4"}
- {"package":"ack-ecr-controller","version":"1.0.12"}
- {"package":"ack-ecs-controller","version":"0.0.4"}
- {"package":"ack-efs-controller","version":"0.0.5"}
- {"package":"ack-eks-controller","version":"1.3.3"}
- {"package":"ack-elasticache-controller","version":"0.0.29"}
- {"package":"ack-emrcontainers-controller","version":"1.0.8"}
- {"package":"ack-eventbridge-controller","version":"1.0.6"}
- {"package":"ack-iam-controller","version":"1.3.6"}
- {"package":"ack-kafka-controller","version":"0.0.4"}
- {"package":"ack-keyspaces-controller","version":"0.0.11"}
- ...
- ```
-
-3. Inspect the contents of an extension's metadata:
+4. Inspect the contents of an extension's metadata:
``` terminal
curl -k https://localhost:8443/catalogs/operatorhubio/api/v1/all | jq -s '.[] | select( .schema == "olm.package") | select( .name == "")'
diff --git a/docs/tutorials/install-extension.md b/docs/tutorials/install-extension.md
index 95bdb5c3a..92b7aadeb 100644
--- a/docs/tutorials/install-extension.md
+++ b/docs/tutorials/install-extension.md
@@ -34,7 +34,7 @@ For information on determining the ServiceAccount's permission, please see [Deri
1. Create a CR for the Kubernetes extension you want to install:
``` yaml title="Example CR"
- apiVersion: olm.operatorframework.io/v1alpha1
+ apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name:
@@ -107,7 +107,7 @@ For information on determining the ServiceAccount's permission, please see [Deri
Namespace:
Labels:
Annotations:
- API Version: olm.operatorframework.io/v1alpha1
+ API Version: olm.operatorframework.io/v1
Kind: ClusterExtension
Metadata:
Creation Timestamp: 2024-10-03T16:02:40Z
diff --git a/docs/tutorials/upgrade-extension.md b/docs/tutorials/upgrade-extension.md
index ea0a20344..86ecaeb75 100644
--- a/docs/tutorials/upgrade-extension.md
+++ b/docs/tutorials/upgrade-extension.md
@@ -23,7 +23,7 @@ For more detailed information see [Upgrade Support](../concepts/upgrade-support.
Suppose we have successfully created and installed v0.5.0 of the ArgoCD operator with the following `ClusterExtension`:
``` yaml title="Example CR"
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: argocd
@@ -43,7 +43,7 @@ spec:
``` terminal
kubectl apply -f - < github.com/openshift/operator-framework-catalogd v0.0.0-20241029145152-5850bc1dfd54
diff --git a/go.sum b/go.sum
index 81cf3df9e..266c4f34e 100644
--- a/go.sum
+++ b/go.sum
@@ -533,10 +533,10 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11 h1:eTNDkNRNV5lZvUbVM9Nop0lBcljSnA8rZX6yQPZ0ZnU=
github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11/go.mod h1:EmVJt97N+pfWFsli/ipXTBZqSG5F5KGQhm3c3IsGq1o=
-github.com/openshift/operator-framework-catalogd v0.0.0-20241029145152-5850bc1dfd54 h1:8jGe/HY7AIuvgvsR2gZggZrPikt5E/v2QiEzP1EqwF0=
-github.com/openshift/operator-framework-catalogd v0.0.0-20241029145152-5850bc1dfd54/go.mod h1:anZurjcFMBvbkuyqlJ98v9z+yjniPKqmhlyitk9DuBQ=
github.com/operator-framework/api v0.27.0 h1:OrVaGKZJvbZo58HTv2guz7aURkhVKYhFqZ/6VpifiXI=
github.com/operator-framework/api v0.27.0/go.mod h1:lg2Xx+S8NQWGYlEOvFwQvH46E5EK5IrAIL7HWfAhciM=
+github.com/operator-framework/catalogd v1.0.0-rc1 h1:wuM8yLy52lwrfyVJJ++l8zV+pxJOnuQLC84fO0UTbts=
+github.com/operator-framework/catalogd v1.0.0-rc1/go.mod h1:ERq4C2ksfkf3wu3XmtGP2fIkBSqS6LfaHhtcSEcU7Ww=
github.com/operator-framework/helm-operator-plugins v0.7.0 h1:YmtIWFc9BaNaDc5mk/dkG0P2BqPZOqpDvjWih5Fczuk=
github.com/operator-framework/helm-operator-plugins v0.7.0/go.mod h1:fUUCJR3bWtMBZ1qdDhbwjacsBHi9uT576tF4u/DwOgQ=
github.com/operator-framework/operator-lib v0.15.0 h1:0QeRM4PMtThqINpcFGCEBnIV3Z8u7/8fYLEx6mUtdcM=
@@ -991,18 +991,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
-k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40=
-k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ=
+k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0=
+k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM=
k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
-k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c=
-k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM=
+k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
+k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
k8s.io/cli-runtime v0.31.2 h1:7FQt4C4Xnqx8V1GJqymInK0FFsoC+fAZtbLqgXYVOLQ=
k8s.io/cli-runtime v0.31.2/go.mod h1:XROyicf+G7rQ6FQJMbeDV9jqxzkWXTYD6Uxd15noe0Q=
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
-k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8=
-k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w=
+k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
+k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
diff --git a/hack/test/build-push-e2e-catalog.sh b/hack/test/build-push-e2e-catalog.sh
deleted file mode 100755
index 9fd1a9d6b..000000000
--- a/hack/test/build-push-e2e-catalog.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#! /bin/bash
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-help="
-build-push-e2e-catalog.sh is a script to build and push the e2e catalog image using kaniko.
-Usage:
- build-push-e2e-catalog.sh [NAMESPACE] [TAG]
-
-Argument Descriptions:
- - NAMESPACE is the namespace the kaniko Job should be created in
- - TAG is the full tag used to build and push the catalog image
-"
-
-if [[ "$#" -ne 2 ]]; then
- echo "Illegal number of arguments passed"
- echo "${help}"
- exit 1
-fi
-
-namespace=$1
-image=$2
-tag=${image##*:}
-
-echo "${namespace}" "${image}" "${tag}"
-
-kubectl create configmap -n "${namespace}" --from-file=testdata/catalogs/test-catalog-${tag}.Dockerfile operator-controller-e2e-${tag}.dockerfile
-kubectl create configmap -n "${namespace}" --from-file=testdata/catalogs/test-catalog-${tag} operator-controller-e2e-${tag}.build-contents
-
-kubectl apply -f - << EOF
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: "kaniko-${tag}"
- namespace: "${namespace}"
-spec:
- template:
- spec:
- containers:
- - name: kaniko-${tag}
- image: gcr.io/kaniko-project/executor:latest
- args: ["--dockerfile=/workspace/test-catalog-${tag}.Dockerfile",
- "--context=/workspace/",
- "--destination=${image}",
- "--skip-tls-verify"]
- volumeMounts:
- - name: dockerfile
- mountPath: /workspace/
- - name: build-contents
- mountPath: /workspace/test-catalog-${tag}/
- restartPolicy: Never
- volumes:
- - name: dockerfile
- configMap:
- name: operator-controller-e2e-${tag}.dockerfile
- items:
- - key: test-catalog-${tag}.Dockerfile
- path: test-catalog-${tag}.Dockerfile
- - name: build-contents
- configMap:
- name: operator-controller-e2e-${tag}.build-contents
-EOF
-
-kubectl wait --for=condition=Complete -n "${namespace}" jobs/kaniko-${tag} --timeout=60s
diff --git a/hack/test/pre-upgrade-setup.sh b/hack/test/pre-upgrade-setup.sh
index 00734f952..33dd035fb 100755
--- a/hack/test/pre-upgrade-setup.sh
+++ b/hack/test/pre-upgrade-setup.sh
@@ -20,7 +20,7 @@ TEST_CLUSTER_CATALOG_NAME=$2
TEST_CLUSTER_EXTENSION_NAME=$3
kubectl apply -f - << EOF
-apiVersion: olm.operatorframework.io/v1alpha1
+apiVersion: olm.operatorframework.io/v1
kind: ClusterCatalog
metadata:
name: ${TEST_CLUSTER_CATALOG_NAME}
@@ -29,7 +29,7 @@ spec:
type: Image
image:
ref: ${TEST_CATALOG_IMG}
- pollInterval: 24h
+ pollIntervalMinutes: 1440
EOF
kubectl apply -f - < 0 {
- status, reason, message = metav1.ConditionTrue, ocv1alpha1.ReasonDeprecated, strings.Join(deprecationMessages, ";")
+ status, reason, message = metav1.ConditionTrue, ocv1.ReasonDeprecated, strings.Join(deprecationMessages, ";")
}
apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{
- Type: ocv1alpha1.TypeDeprecated,
+ Type: ocv1.TypeDeprecated,
Reason: reason,
Status: status,
Message: message,
@@ -383,14 +383,14 @@ func SetDeprecationStatus(ext *ocv1alpha1.ClusterExtension, bundleName string, d
// finally, set the individual deprecation conditions for package, channel, and bundle
for _, conditionType := range []string{
- ocv1alpha1.TypePackageDeprecated,
- ocv1alpha1.TypeChannelDeprecated,
- ocv1alpha1.TypeBundleDeprecated,
+ ocv1.TypePackageDeprecated,
+ ocv1.TypeChannelDeprecated,
+ ocv1.TypeBundleDeprecated,
} {
entries, ok := deprecations[conditionType]
- status, reason, message := metav1.ConditionFalse, ocv1alpha1.ReasonDeprecated, ""
+ status, reason, message := metav1.ConditionFalse, ocv1.ReasonDeprecated, ""
if ok {
- status, reason = metav1.ConditionTrue, ocv1alpha1.ReasonDeprecated
+ status, reason = metav1.ConditionTrue, ocv1.ReasonDeprecated
for _, entry := range entries {
message = fmt.Sprintf("%s\n%s", message, entry.Message)
}
@@ -408,7 +408,7 @@ func SetDeprecationStatus(ext *ocv1alpha1.ClusterExtension, bundleName string, d
// SetupWithManager sets up the controller with the Manager.
func (r *ClusterExtensionReconciler) SetupWithManager(mgr ctrl.Manager) error {
controller, err := ctrl.NewControllerManagedBy(mgr).
- For(&ocv1alpha1.ClusterExtension{}).
+ For(&ocv1.ClusterExtension{}).
Watches(&catalogd.ClusterCatalog{},
crhandler.EnqueueRequestsFromMapFunc(clusterExtensionRequestsForCatalog(mgr.GetClient(), mgr.GetLogger())),
builder.WithPredicates(predicate.Funcs{
@@ -438,7 +438,7 @@ func (r *ClusterExtensionReconciler) SetupWithManager(mgr ctrl.Manager) error {
return nil
}
-func wrapErrorWithResolutionInfo(resolved ocv1alpha1.BundleMetadata, err error) error {
+func wrapErrorWithResolutionInfo(resolved ocv1.BundleMetadata, err error) error {
return fmt.Errorf("%w for resolved bundle %q with version %q", err, resolved.Name, resolved.Version)
}
@@ -447,7 +447,7 @@ func clusterExtensionRequestsForCatalog(c client.Reader, logger logr.Logger) crh
return func(ctx context.Context, _ client.Object) []reconcile.Request {
// no way of associating an extension to a catalog so create reconcile requests for everything
clusterExtensions := metav1.PartialObjectMetadataList{}
- clusterExtensions.SetGroupVersionKind(ocv1alpha1.GroupVersion.WithKind("ClusterExtensionList"))
+ clusterExtensions.SetGroupVersionKind(ocv1.GroupVersion.WithKind("ClusterExtensionList"))
err := c.List(ctx, &clusterExtensions)
if err != nil {
logger.Error(err, "unable to enqueue cluster extensions for catalog reconcile")
@@ -471,11 +471,11 @@ type DefaultInstalledBundleGetter struct {
}
type InstalledBundle struct {
- ocv1alpha1.BundleMetadata
+ ocv1.BundleMetadata
Image string
}
-func (d *DefaultInstalledBundleGetter) GetInstalledBundle(ctx context.Context, ext *ocv1alpha1.ClusterExtension) (*InstalledBundle, error) {
+func (d *DefaultInstalledBundleGetter) GetInstalledBundle(ctx context.Context, ext *ocv1.ClusterExtension) (*InstalledBundle, error) {
cl, err := d.ActionClientFor(ctx, ext)
if err != nil {
return nil, err
@@ -494,7 +494,7 @@ func (d *DefaultInstalledBundleGetter) GetInstalledBundle(ctx context.Context, e
for _, rel := range relhis {
if rel.Info != nil && rel.Info.Status == release.StatusDeployed {
return &InstalledBundle{
- BundleMetadata: ocv1alpha1.BundleMetadata{
+ BundleMetadata: ocv1.BundleMetadata{
Name: rel.Labels[labels.BundleNameKey],
Version: rel.Labels[labels.BundleVersionKey],
},
diff --git a/internal/controllers/clusterextension_controller_test.go b/internal/controllers/clusterextension_controller_test.go
index c17390a57..ab4dc5e18 100644
--- a/internal/controllers/clusterextension_controller_test.go
+++ b/internal/controllers/clusterextension_controller_test.go
@@ -27,7 +27,7 @@ import (
helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client"
"github.com/operator-framework/operator-registry/alpha/declcfg"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
"github.com/operator-framework/operator-controller/internal/conditionsets"
"github.com/operator-framework/operator-controller/internal/controllers"
"github.com/operator-framework/operator-controller/internal/finalizers"
@@ -50,7 +50,7 @@ func TestClusterExtensionDoesNotExist(t *testing.T) {
func TestClusterExtensionResolutionFails(t *testing.T) {
pkgName := fmt.Sprintf("non-existent-%s", rand.String(6))
cl, reconciler := newClientAndReconciler(t)
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
return nil, nil, nil, fmt.Errorf("no package %q found", pkgName)
})
ctx := context.Background()
@@ -58,20 +58,18 @@ func TestClusterExtensionResolutionFails(t *testing.T) {
t.Log("When the cluster extension specifies a non-existent package")
t.Log("By initializing cluster state")
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: "default",
- },
+ Namespace: "default",
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: "default",
},
},
}
@@ -90,14 +88,14 @@ func TestClusterExtensionResolutionFails(t *testing.T) {
require.Empty(t, clusterExtension.Status.Install)
t.Log("By checking the expected conditions")
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, cond)
require.Equal(t, metav1.ConditionTrue, cond.Status)
- require.Equal(t, ocv1alpha1.ReasonRetrying, cond.Reason)
+ require.Equal(t, ocv1.ReasonRetrying, cond.Reason)
require.Equal(t, fmt.Sprintf("no package %q found", pkgName), cond.Message)
verifyInvariants(ctx, t, reconciler.Client, clusterExtension)
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
}
func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) {
@@ -134,22 +132,20 @@ func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) {
namespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: namespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: namespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -158,7 +154,7 @@ func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) {
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -178,23 +174,23 @@ func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) {
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
t.Log("By checking the status fields")
- expectedBundleMetadata := ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
+ expectedBundleMetadata := ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
require.Empty(t, clusterExtension.Status.Install)
t.Log("By checking the expected conditions")
expectStatus := metav1.ConditionTrue
- expectReason := ocv1alpha1.ReasonRetrying
+ expectReason := ocv1.ReasonRetrying
if tc.expectTerminal {
expectStatus = metav1.ConditionFalse
- expectReason = ocv1alpha1.ReasonBlocked
+ expectReason = ocv1.ReasonBlocked
}
- progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, progressingCond)
require.Equal(t, expectStatus, progressingCond.Status)
require.Equal(t, expectReason, progressingCond.Reason)
require.Contains(t, progressingCond.Message, fmt.Sprintf("for resolved bundle %q with version %q", expectedBundleMetadata.Name, expectedBundleMetadata.Version))
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
})
}
}
@@ -218,22 +214,20 @@ func TestClusterExtensionUnpackUnexpectedState(t *testing.T) {
namespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: namespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: namespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -242,7 +236,7 @@ func TestClusterExtensionUnpackUnexpectedState(t *testing.T) {
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -255,7 +249,7 @@ func TestClusterExtensionUnpackUnexpectedState(t *testing.T) {
_, _ = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: extKey})
}, "reconciliation should panic on unknown unpack state")
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
}
func TestClusterExtensionResolutionAndUnpackSuccessfulApplierFails(t *testing.T) {
@@ -278,22 +272,20 @@ func TestClusterExtensionResolutionAndUnpackSuccessfulApplierFails(t *testing.T)
namespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: namespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: namespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -302,7 +294,7 @@ func TestClusterExtensionResolutionAndUnpackSuccessfulApplierFails(t *testing.T)
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -321,23 +313,23 @@ func TestClusterExtensionResolutionAndUnpackSuccessfulApplierFails(t *testing.T)
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
t.Log("By checking the status fields")
- expectedBundleMetadata := ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
+ expectedBundleMetadata := ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
require.Empty(t, clusterExtension.Status.Install)
t.Log("By checking the expected installed conditions")
- installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
require.NotNil(t, installedCond)
require.Equal(t, metav1.ConditionFalse, installedCond.Status)
- require.Equal(t, ocv1alpha1.ReasonFailed, installedCond.Reason)
+ require.Equal(t, ocv1.ReasonFailed, installedCond.Reason)
t.Log("By checking the expected progressing conditions")
- progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, progressingCond)
require.Equal(t, metav1.ConditionTrue, progressingCond.Status)
- require.Equal(t, ocv1alpha1.ReasonRetrying, progressingCond.Reason)
+ require.Equal(t, ocv1.ReasonRetrying, progressingCond.Reason)
require.Contains(t, progressingCond.Message, fmt.Sprintf("for resolved bundle %q with version %q", expectedBundleMetadata.Name, expectedBundleMetadata.Version))
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
}
func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) {
@@ -360,22 +352,20 @@ func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) {
namespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: namespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: namespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -384,7 +374,7 @@ func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) {
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -398,7 +388,7 @@ func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) {
}
reconciler.InstalledBundleGetter = &MockInstalledBundleGetter{
bundle: &controllers.InstalledBundle{
- BundleMetadata: ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"},
+ BundleMetadata: ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"},
Image: "quay.io/operatorhubio/prometheus@fake1.0.0",
},
}
@@ -422,23 +412,23 @@ func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) {
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
t.Log("By checking the status fields")
- expectedBundleMetadata := ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
+ expectedBundleMetadata := ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
require.Equal(t, expectedBundleMetadata, clusterExtension.Status.Install.Bundle)
t.Log("By checking the expected installed conditions")
- installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
require.NotNil(t, installedCond)
require.Equal(t, metav1.ConditionTrue, installedCond.Status)
- require.Equal(t, ocv1alpha1.ReasonSucceeded, installedCond.Reason)
+ require.Equal(t, ocv1.ReasonSucceeded, installedCond.Reason)
t.Log("By checking the expected progressing conditions")
- progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, progressingCond)
require.Equal(t, metav1.ConditionTrue, progressingCond.Status)
- require.Equal(t, ocv1alpha1.ReasonRetrying, progressingCond.Reason)
+ require.Equal(t, ocv1.ReasonRetrying, progressingCond.Reason)
require.Contains(t, progressingCond.Message, fmt.Sprintf("for resolved bundle %q with version %q", expectedBundleMetadata.Name, expectedBundleMetadata.Version))
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
}
func TestClusterExtensionManagerFailed(t *testing.T) {
@@ -461,22 +451,20 @@ func TestClusterExtensionManagerFailed(t *testing.T) {
namespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: namespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: namespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -485,7 +473,7 @@ func TestClusterExtensionManagerFailed(t *testing.T) {
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -507,21 +495,21 @@ func TestClusterExtensionManagerFailed(t *testing.T) {
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
t.Log("By checking the status fields")
- require.Equal(t, ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}, clusterExtension.Status.Install.Bundle)
+ require.Equal(t, ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}, clusterExtension.Status.Install.Bundle)
t.Log("By checking the expected installed conditions")
- installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
require.NotNil(t, installedCond)
require.Equal(t, metav1.ConditionTrue, installedCond.Status)
- require.Equal(t, ocv1alpha1.ReasonSucceeded, installedCond.Reason)
+ require.Equal(t, ocv1.ReasonSucceeded, installedCond.Reason)
t.Log("By checking the expected progressing conditions")
- progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, progressingCond)
require.Equal(t, metav1.ConditionTrue, progressingCond.Status)
- require.Equal(t, ocv1alpha1.ReasonRetrying, progressingCond.Reason)
+ require.Equal(t, ocv1.ReasonRetrying, progressingCond.Reason)
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
}
func TestClusterExtensionManagedContentCacheWatchFail(t *testing.T) {
@@ -544,23 +532,21 @@ func TestClusterExtensionManagedContentCacheWatchFail(t *testing.T) {
installNamespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
- SourceType: ocv1alpha1.SourceTypeCatalog,
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
+ SourceType: ocv1.SourceTypeCatalog,
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: installNamespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: installNamespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -569,7 +555,7 @@ func TestClusterExtensionManagedContentCacheWatchFail(t *testing.T) {
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -593,21 +579,21 @@ func TestClusterExtensionManagedContentCacheWatchFail(t *testing.T) {
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
t.Log("By checking the status fields")
- require.Equal(t, ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}, clusterExtension.Status.Install.Bundle)
+ require.Equal(t, ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}, clusterExtension.Status.Install.Bundle)
t.Log("By checking the expected installed conditions")
- installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
require.NotNil(t, installedCond)
require.Equal(t, metav1.ConditionTrue, installedCond.Status)
- require.Equal(t, ocv1alpha1.ReasonSucceeded, installedCond.Reason)
+ require.Equal(t, ocv1.ReasonSucceeded, installedCond.Reason)
t.Log("By checking the expected progressing conditions")
- progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, progressingCond)
require.Equal(t, metav1.ConditionTrue, progressingCond.Status)
- require.Equal(t, ocv1alpha1.ReasonRetrying, progressingCond.Reason)
+ require.Equal(t, ocv1.ReasonRetrying, progressingCond.Reason)
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
}
func TestClusterExtensionInstallationSucceeds(t *testing.T) {
@@ -630,22 +616,20 @@ func TestClusterExtensionInstallationSucceeds(t *testing.T) {
namespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: namespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: namespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -654,7 +638,7 @@ func TestClusterExtensionInstallationSucceeds(t *testing.T) {
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -676,21 +660,21 @@ func TestClusterExtensionInstallationSucceeds(t *testing.T) {
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
t.Log("By checking the status fields")
- require.Equal(t, ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}, clusterExtension.Status.Install.Bundle)
+ require.Equal(t, ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}, clusterExtension.Status.Install.Bundle)
t.Log("By checking the expected installed conditions")
- installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
require.NotNil(t, installedCond)
require.Equal(t, metav1.ConditionTrue, installedCond.Status)
- require.Equal(t, ocv1alpha1.ReasonSucceeded, installedCond.Reason)
+ require.Equal(t, ocv1.ReasonSucceeded, installedCond.Reason)
t.Log("By checking the expected progressing conditions")
- progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, progressingCond)
- require.Equal(t, metav1.ConditionFalse, progressingCond.Status)
- require.Equal(t, ocv1alpha1.ReasonSucceeded, progressingCond.Reason)
+ require.Equal(t, metav1.ConditionTrue, progressingCond.Status)
+ require.Equal(t, ocv1.ReasonSucceeded, progressingCond.Reason)
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
}
func TestClusterExtensionDeleteFinalizerFails(t *testing.T) {
@@ -713,22 +697,20 @@ func TestClusterExtensionDeleteFinalizerFails(t *testing.T) {
namespace := fmt.Sprintf("test-ns-%s", rand.String(8))
serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{Name: extKey.Name},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkgName,
Version: pkgVer,
Channels: []string{pkgChan},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: namespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: serviceAccount,
- },
+ Namespace: namespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: serviceAccount,
},
},
}
@@ -736,7 +718,7 @@ func TestClusterExtensionDeleteFinalizerFails(t *testing.T) {
require.NoError(t, err)
t.Log("It sets resolution success status")
t.Log("By running reconcile")
- reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1alpha1.ClusterExtension, _ *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+ reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
v := bsemver.MustParse("1.0.0")
return &declcfg.Bundle{
Name: "prometheus.v1.0.0",
@@ -754,7 +736,7 @@ func TestClusterExtensionDeleteFinalizerFails(t *testing.T) {
}
reconciler.InstalledBundleGetter = &MockInstalledBundleGetter{
bundle: &controllers.InstalledBundle{
- BundleMetadata: ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"},
+ BundleMetadata: ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"},
Image: "quay.io/operatorhubio/prometheus@fake1.0.0",
},
}
@@ -774,37 +756,37 @@ func TestClusterExtensionDeleteFinalizerFails(t *testing.T) {
t.Log("By fetching updated cluster extension after first reconcile")
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
- expectedBundleMetadata := ocv1alpha1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
+ expectedBundleMetadata := ocv1.BundleMetadata{Name: "prometheus.v1.0.0", Version: "1.0.0"}
require.Equal(t, expectedBundleMetadata, clusterExtension.Status.Install.Bundle)
require.NotNil(t, cond)
require.Equal(t, metav1.ConditionTrue, cond.Status)
- require.NoError(t, cl.DeleteAllOf(ctx, &ocv1alpha1.ClusterExtension{}))
+ require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{}))
res, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: extKey})
require.Error(t, err, res)
t.Log("By fetching updated cluster extension after second reconcile")
require.NoError(t, cl.Get(ctx, extKey, clusterExtension))
- cond = apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond = apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
require.Equal(t, expectedBundleMetadata, clusterExtension.Status.Install.Bundle)
require.NotNil(t, cond)
require.Equal(t, metav1.ConditionTrue, cond.Status)
require.Equal(t, fakeFinalizer, clusterExtension.Finalizers[0])
- cond = apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond = apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, cond)
require.Equal(t, metav1.ConditionTrue, cond.Status)
require.Contains(t, cond.Message, finalizersMessage)
}
-func verifyInvariants(ctx context.Context, t *testing.T, c client.Client, ext *ocv1alpha1.ClusterExtension) {
+func verifyInvariants(ctx context.Context, t *testing.T, c client.Client, ext *ocv1.ClusterExtension) {
key := client.ObjectKeyFromObject(ext)
require.NoError(t, c.Get(ctx, key, ext))
verifyConditionsInvariants(t, ext)
}
-func verifyConditionsInvariants(t *testing.T, ext *ocv1alpha1.ClusterExtension) {
+func verifyConditionsInvariants(t *testing.T, ext *ocv1.ClusterExtension) {
// Expect that the cluster extension's set of conditions contains all defined
// condition types for the ClusterExtension API. Every reconcile should always
// ensure every condition type's status/reason/message reflects the state
@@ -822,48 +804,48 @@ func verifyConditionsInvariants(t *testing.T, ext *ocv1alpha1.ClusterExtension)
func TestSetDeprecationStatus(t *testing.T) {
for _, tc := range []struct {
name string
- clusterExtension *ocv1alpha1.ClusterExtension
- expectedClusterExtension *ocv1alpha1.ClusterExtension
+ clusterExtension *ocv1.ClusterExtension
+ expectedClusterExtension *ocv1.ClusterExtension
bundle *declcfg.Bundle
deprecation *declcfg.Deprecation
}{
{
name: "no deprecations, all deprecation statuses set to False",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
@@ -875,53 +857,53 @@ func TestSetDeprecationStatus(t *testing.T) {
},
{
name: "deprecated channel, but no channel specified, all deprecation statuses set to False",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{},
+ Catalog: &ocv1.CatalogSource{},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{},
+ Catalog: &ocv1.CatalogSource{},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
@@ -940,57 +922,57 @@ func TestSetDeprecationStatus(t *testing.T) {
},
{
name: "deprecated channel, but a non-deprecated channel specified, all deprecation statuses set to False",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"nondeprecated"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"nondeprecated"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
@@ -1011,57 +993,57 @@ func TestSetDeprecationStatus(t *testing.T) {
},
{
name: "deprecated channel specified, ChannelDeprecated and Deprecated status set to true, others set to false",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
@@ -1083,57 +1065,57 @@ func TestSetDeprecationStatus(t *testing.T) {
},
{
name: "deprecated package and channel specified, deprecated bundle, all deprecation statuses set to true",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
@@ -1168,57 +1150,57 @@ func TestSetDeprecationStatus(t *testing.T) {
},
{
name: "deprecated channel specified, deprecated bundle, all deprecation statuses set to true, all deprecation statuses set to true except PackageDeprecated",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
@@ -1247,57 +1229,57 @@ func TestSetDeprecationStatus(t *testing.T) {
},
{
name: "deprecated package and channel specified, all deprecation statuses set to true except BundleDeprecated",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
@@ -1325,57 +1307,57 @@ func TestSetDeprecationStatus(t *testing.T) {
},
{
name: "deprecated channels specified, ChannelDeprecated and Deprecated status set to true, others set to false",
- clusterExtension: &ocv1alpha1.ClusterExtension{
+ clusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel", "anotherbadchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{},
},
},
- expectedClusterExtension: &ocv1alpha1.ClusterExtension{
+ expectedClusterExtension: &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
Channels: []string{"badchannel", "anotherbadchannel"},
},
},
},
- Status: ocv1alpha1.ClusterExtensionStatus{
+ Status: ocv1.ClusterExtensionStatus{
Conditions: []metav1.Condition{
{
- Type: ocv1alpha1.TypeDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypePackageDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypePackageDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeChannelDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeChannelDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionTrue,
ObservedGeneration: 1,
},
{
- Type: ocv1alpha1.TypeBundleDeprecated,
- Reason: ocv1alpha1.ReasonDeprecated,
+ Type: ocv1.TypeBundleDeprecated,
+ Reason: ocv1.ReasonDeprecated,
Status: metav1.ConditionFalse,
ObservedGeneration: 1,
},
@@ -1452,7 +1434,7 @@ func (mag *MockActionGetter) Reconcile(rel *release.Release) error {
func TestGetInstalledBundleHistory(t *testing.T) {
getter := controllers.DefaultInstalledBundleGetter{}
- ext := ocv1alpha1.ClusterExtension{
+ ext := ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ext",
},
@@ -1488,9 +1470,10 @@ func TestGetInstalledBundleHistory(t *testing.T) {
labels.BundleReferenceKey: "bundle-ref",
},
},
- }, nil,
+ },
+ nil,
&controllers.InstalledBundle{
- BundleMetadata: ocv1alpha1.BundleMetadata{
+ BundleMetadata: ocv1.BundleMetadata{
Name: "test-ext",
Version: "1.0",
},
@@ -1522,9 +1505,10 @@ func TestGetInstalledBundleHistory(t *testing.T) {
labels.BundleReferenceKey: "bundle-ref-1",
},
},
- }, nil,
+ },
+ nil,
&controllers.InstalledBundle{
- BundleMetadata: ocv1alpha1.BundleMetadata{
+ BundleMetadata: ocv1.BundleMetadata{
Name: "test-ext",
Version: "1.0",
},
diff --git a/internal/controllers/common_controller.go b/internal/controllers/common_controller.go
index cefe53913..7cee10c10 100644
--- a/internal/controllers/common_controller.go
+++ b/internal/controllers/common_controller.go
@@ -24,11 +24,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
)
// setInstalledStatusFromBundle sets the installed status based on the given installedBundle.
-func setInstalledStatusFromBundle(ext *ocv1alpha1.ClusterExtension, installedBundle *InstalledBundle) {
+func setInstalledStatusFromBundle(ext *ocv1.ClusterExtension, installedBundle *InstalledBundle) {
// Nothing is installed
if installedBundle == nil {
setInstallStatus(ext, nil)
@@ -36,7 +36,7 @@ func setInstalledStatusFromBundle(ext *ocv1alpha1.ClusterExtension, installedBun
return
}
// Something is installed
- installStatus := &ocv1alpha1.ClusterExtensionInstallStatus{
+ installStatus := &ocv1.ClusterExtensionInstallStatus{
Bundle: installedBundle.BundleMetadata,
}
setInstallStatus(ext, installStatus)
@@ -44,60 +44,59 @@ func setInstalledStatusFromBundle(ext *ocv1alpha1.ClusterExtension, installedBun
}
// setInstalledStatusConditionSuccess sets the installed status condition to success.
-func setInstalledStatusConditionSuccess(ext *ocv1alpha1.ClusterExtension, message string) {
+func setInstalledStatusConditionSuccess(ext *ocv1.ClusterExtension, message string) {
apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{
- Type: ocv1alpha1.TypeInstalled,
+ Type: ocv1.TypeInstalled,
Status: metav1.ConditionTrue,
- Reason: ocv1alpha1.ReasonSucceeded,
+ Reason: ocv1.ReasonSucceeded,
Message: message,
ObservedGeneration: ext.GetGeneration(),
})
}
// setInstalledStatusConditionFailed sets the installed status condition to failed.
-func setInstalledStatusConditionFailed(ext *ocv1alpha1.ClusterExtension, message string) {
+func setInstalledStatusConditionFailed(ext *ocv1.ClusterExtension, message string) {
apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{
- Type: ocv1alpha1.TypeInstalled,
+ Type: ocv1.TypeInstalled,
Status: metav1.ConditionFalse,
- Reason: ocv1alpha1.ReasonFailed,
+ Reason: ocv1.ReasonFailed,
Message: message,
ObservedGeneration: ext.GetGeneration(),
})
}
// setInstalledStatusConditionUnknown sets the installed status condition to unknown.
-func setInstalledStatusConditionUnknown(ext *ocv1alpha1.ClusterExtension, message string) {
+func setInstalledStatusConditionUnknown(ext *ocv1.ClusterExtension, message string) {
apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{
- Type: ocv1alpha1.TypeInstalled,
+ Type: ocv1.TypeInstalled,
Status: metav1.ConditionUnknown,
- Reason: ocv1alpha1.ReasonFailed,
+ Reason: ocv1.ReasonFailed,
Message: message,
ObservedGeneration: ext.GetGeneration(),
})
}
-func setInstallStatus(ext *ocv1alpha1.ClusterExtension, installStatus *ocv1alpha1.ClusterExtensionInstallStatus) {
+func setInstallStatus(ext *ocv1.ClusterExtension, installStatus *ocv1.ClusterExtensionInstallStatus) {
ext.Status.Install = installStatus
}
-func setStatusProgressing(ext *ocv1alpha1.ClusterExtension, err error) {
+func setStatusProgressing(ext *ocv1.ClusterExtension, err error) {
progressingCond := metav1.Condition{
- Type: ocv1alpha1.TypeProgressing,
- Status: metav1.ConditionFalse,
- Reason: ocv1alpha1.ReasonSucceeded,
+ Type: ocv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: ocv1.ReasonSucceeded,
Message: "desired state reached",
ObservedGeneration: ext.GetGeneration(),
}
if err != nil {
- progressingCond.Status = metav1.ConditionTrue
- progressingCond.Reason = ocv1alpha1.ReasonRetrying
+ progressingCond.Reason = ocv1.ReasonRetrying
progressingCond.Message = err.Error()
}
if errors.Is(err, reconcile.TerminalError(nil)) {
progressingCond.Status = metav1.ConditionFalse
- progressingCond.Reason = ocv1alpha1.ReasonBlocked
+ progressingCond.Reason = ocv1.ReasonBlocked
}
apimeta.SetStatusCondition(&ext.Status.Conditions, progressingCond)
diff --git a/internal/controllers/common_controller_test.go b/internal/controllers/common_controller_test.go
index 8f703fc6e..7b644172d 100644
--- a/internal/controllers/common_controller_test.go
+++ b/internal/controllers/common_controller_test.go
@@ -11,53 +11,53 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
)
func TestSetStatusProgressing(t *testing.T) {
for _, tc := range []struct {
name string
err error
- clusterExtension *ocv1alpha1.ClusterExtension
+ clusterExtension *ocv1.ClusterExtension
expected metav1.Condition
}{
{
- name: "non-nil ClusterExtension, nil error, Progressing condition has status False with reason Success",
+ name: "non-nil ClusterExtension, nil error, Progressing condition has status True with reason Success",
err: nil,
- clusterExtension: &ocv1alpha1.ClusterExtension{},
+ clusterExtension: &ocv1.ClusterExtension{},
expected: metav1.Condition{
- Type: ocv1alpha1.TypeProgressing,
- Status: metav1.ConditionFalse,
- Reason: ocv1alpha1.ReasonSucceeded,
+ Type: ocv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: ocv1.ReasonSucceeded,
Message: "desired state reached",
},
},
{
name: "non-nil ClusterExtension, non-terminal error, Progressing condition has status True with reason Retrying",
err: errors.New("boom"),
- clusterExtension: &ocv1alpha1.ClusterExtension{},
+ clusterExtension: &ocv1.ClusterExtension{},
expected: metav1.Condition{
- Type: ocv1alpha1.TypeProgressing,
+ Type: ocv1.TypeProgressing,
Status: metav1.ConditionTrue,
- Reason: ocv1alpha1.ReasonRetrying,
+ Reason: ocv1.ReasonRetrying,
Message: "boom",
},
},
{
name: "non-nil ClusterExtension, terminal error, Progressing condition has status False with reason Blocked",
err: reconcile.TerminalError(errors.New("boom")),
- clusterExtension: &ocv1alpha1.ClusterExtension{},
+ clusterExtension: &ocv1.ClusterExtension{},
expected: metav1.Condition{
- Type: ocv1alpha1.TypeProgressing,
+ Type: ocv1.TypeProgressing,
Status: metav1.ConditionFalse,
- Reason: ocv1alpha1.ReasonBlocked,
+ Reason: ocv1.ReasonBlocked,
Message: "terminal error: boom",
},
},
} {
t.Run(tc.name, func(t *testing.T) {
setStatusProgressing(tc.clusterExtension, tc.err)
- progressingCond := meta.FindStatusCondition(tc.clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ progressingCond := meta.FindStatusCondition(tc.clusterExtension.Status.Conditions, ocv1.TypeProgressing)
require.NotNil(t, progressingCond, "progressing condition should be set but was not")
diff := cmp.Diff(*progressingCond, tc.expected, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration"))
require.Empty(t, diff, "difference between actual and expected Progressing conditions")
diff --git a/internal/controllers/suite_test.go b/internal/controllers/suite_test.go
index 97ea3c427..52fd8900a 100644
--- a/internal/controllers/suite_test.go
+++ b/internal/controllers/suite_test.go
@@ -35,7 +35,7 @@ import (
helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
"github.com/operator-framework/operator-controller/internal/contentmanager"
cmcache "github.com/operator-framework/operator-controller/internal/contentmanager/cache"
"github.com/operator-framework/operator-controller/internal/controllers"
@@ -65,7 +65,7 @@ func newClient(t *testing.T) client.Client {
// TODO: this is a live client, which behaves differently than a cache client.
// We may want to use a caching client instead to get closer to real behavior.
sch := runtime.NewScheme()
- require.NoError(t, ocv1alpha1.AddToScheme(sch))
+ require.NoError(t, ocv1.AddToScheme(sch))
cl, err := client.New(config, client.Options{Scheme: sch})
require.NoError(t, err)
require.NotNil(t, cl)
@@ -80,7 +80,7 @@ func (m *MockInstalledBundleGetter) SetBundle(bundle *controllers.InstalledBundl
m.bundle = bundle
}
-func (m *MockInstalledBundleGetter) GetInstalledBundle(ctx context.Context, ext *ocv1alpha1.ClusterExtension) (*controllers.InstalledBundle, error) {
+func (m *MockInstalledBundleGetter) GetInstalledBundle(ctx context.Context, ext *ocv1.ClusterExtension) (*controllers.InstalledBundle, error) {
return m.bundle, nil
}
@@ -92,7 +92,7 @@ type MockApplier struct {
state string
}
-func (m *MockApplier) Apply(_ context.Context, _ fs.FS, _ *ocv1alpha1.ClusterExtension, _ map[string]string, _ map[string]string) ([]client.Object, string, error) {
+func (m *MockApplier) Apply(_ context.Context, _ fs.FS, _ *ocv1.ClusterExtension, _ map[string]string, _ map[string]string) ([]client.Object, string, error) {
if m.err != nil {
return nil, m.state, m.err
}
@@ -107,14 +107,14 @@ type MockManagedContentCacheManager struct {
cache cmcache.Cache
}
-func (m *MockManagedContentCacheManager) Get(_ context.Context, _ *ocv1alpha1.ClusterExtension) (cmcache.Cache, error) {
+func (m *MockManagedContentCacheManager) Get(_ context.Context, _ *ocv1.ClusterExtension) (cmcache.Cache, error) {
if m.err != nil {
return nil, m.err
}
return m.cache, nil
}
-func (m *MockManagedContentCacheManager) Delete(_ *ocv1alpha1.ClusterExtension) error {
+func (m *MockManagedContentCacheManager) Delete(_ *ocv1.ClusterExtension) error {
return m.err
}
diff --git a/internal/resolve/catalog.go b/internal/resolve/catalog.go
index de210fe3c..944744c5f 100644
--- a/internal/resolve/catalog.go
+++ b/internal/resolve/catalog.go
@@ -15,10 +15,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
- catalogd "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
"github.com/operator-framework/operator-registry/alpha/declcfg"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
"github.com/operator-framework/operator-controller/internal/bundleutil"
"github.com/operator-framework/operator-controller/internal/catalogmetadata/compare"
"github.com/operator-framework/operator-controller/internal/catalogmetadata/filter"
@@ -38,18 +38,23 @@ type foundBundle struct {
}
// Resolve returns a Bundle from a catalog that needs to get installed on the cluster.
-func (r *CatalogResolver) Resolve(ctx context.Context, ext *ocv1alpha1.ClusterExtension, installedBundle *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+func (r *CatalogResolver) Resolve(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
packageName := ext.Spec.Source.Catalog.PackageName
versionRange := ext.Spec.Source.Catalog.Version
channels := ext.Spec.Source.Catalog.Channels
- selector, err := metav1.LabelSelectorAsSelector(&ext.Spec.Source.Catalog.Selector)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("desired catalog selector is invalid: %w", err)
- }
- // A nothing (empty) seletor selects everything
- if selector == labels.Nothing() {
- selector = labels.Everything()
+ // unless overridden, default to selecting all bundles
+ var selector = labels.Everything()
+ var err error
+ if ext.Spec.Source.Catalog != nil {
+ selector, err = metav1.LabelSelectorAsSelector(ext.Spec.Source.Catalog.Selector)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("desired catalog selector is invalid: %w", err)
+ }
+ // A nothing (empty) selector selects everything
+ if selector == labels.Nothing() {
+ selector = labels.Everything()
+ }
}
var versionRangeConstraints *mmsemver.Constraints
@@ -84,7 +89,7 @@ func (r *CatalogResolver) Resolve(ctx context.Context, ext *ocv1alpha1.ClusterEx
predicates = append(predicates, filter.InMastermindsSemverRange(versionRangeConstraints))
}
- if ext.Spec.Source.Catalog.UpgradeConstraintPolicy != ocv1alpha1.UpgradeConstraintPolicySelfCertified && installedBundle != nil {
+ if ext.Spec.Source.Catalog.UpgradeConstraintPolicy != ocv1.UpgradeConstraintPolicySelfCertified && installedBundle != nil {
successorPredicate, err := filter.SuccessorsOf(*installedBundle, packageFBC.Channels...)
if err != nil {
return fmt.Errorf("error finding upgrade edges: %w", err)
@@ -182,7 +187,7 @@ type resolutionError struct {
PackageName string
Version string
Channels []string
- InstalledBundle *ocv1alpha1.BundleMetadata
+ InstalledBundle *ocv1.BundleMetadata
ResolvedBundles []foundBundle
}
@@ -245,7 +250,7 @@ func CatalogWalker(
// Remove disabled catalogs from consideration
catalogs = slices.DeleteFunc(catalogs, func(c catalogd.ClusterCatalog) bool {
- if c.Spec.Availability == "Disabled" {
+ if c.Spec.AvailabilityMode == catalogd.AvailabilityModeUnavailable {
l.Info("excluding ClusterCatalog from resolution process since it is disabled", "catalog", c.Name)
return true
}
diff --git a/internal/resolve/catalog_test.go b/internal/resolve/catalog_test.go
index 4856efc7e..83eeba9b0 100644
--- a/internal/resolve/catalog_test.go
+++ b/internal/resolve/catalog_test.go
@@ -16,18 +16,18 @@ import (
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
"github.com/operator-framework/operator-registry/alpha/declcfg"
"github.com/operator-framework/operator-registry/alpha/property"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
"github.com/operator-framework/operator-controller/internal/features"
)
func TestInvalidClusterExtensionVersionRange(t *testing.T) {
r := CatalogResolver{}
pkgName := randPkg()
- ce := buildFooClusterExtension(pkgName, []string{}, "foobar", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "foobar", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, `desired version range "foobar" is invalid: improper constraint: foobar`)
}
@@ -37,7 +37,7 @@ func TestErrorWalkingCatalogs(t *testing.T) {
return fmt.Errorf("fake error")
}}
pkgName := randPkg()
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, "error walking catalogs: fake error")
}
@@ -50,7 +50,7 @@ func TestErrorGettingPackage(t *testing.T) {
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
pkgName := randPkg()
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, fmt.Sprintf(`error walking catalogs: error getting package %q from catalog "a": fake error`, pkgName))
}
@@ -69,7 +69,7 @@ func TestPackageDoesNotExist(t *testing.T) {
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
pkgName := randPkg()
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, fmt.Sprintf(`no bundles found for package %q`, pkgName))
}
@@ -88,7 +88,7 @@ func TestPackageExists(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
assert.Equal(t, genBundle(pkgName, "3.0.0"), *gotBundle)
@@ -117,7 +117,7 @@ func TestValidationFailed(t *testing.T) {
},
},
}
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
require.Error(t, err)
}
@@ -136,7 +136,7 @@ func TestVersionDoesNotExist(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "4.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "4.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, fmt.Sprintf(`no bundles found for package %q matching version "4.0.0"`, pkgName))
}
@@ -155,7 +155,7 @@ func TestVersionExists(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <2.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <2.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
assert.Equal(t, genBundle(pkgName, "1.0.2"), *gotBundle)
@@ -177,7 +177,7 @@ func TestChannelDoesNotExist(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{"stable"}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{"stable"}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, fmt.Sprintf(`no bundles found for package %q in channels [stable]`, pkgName))
}
@@ -196,7 +196,7 @@ func TestChannelExists(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{"beta"}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{"beta"}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
assert.Equal(t, genBundle(pkgName, "1.0.2"), *gotBundle)
@@ -218,7 +218,7 @@ func TestChannelExistsButNotVersion(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{"beta"}, "3.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{"beta"}, "3.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, fmt.Sprintf(`no bundles found for package %q matching version "3.0.0" in channels [beta]`, pkgName))
}
@@ -237,7 +237,7 @@ func TestVersionExistsButNotChannel(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{"stable"}, "1.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{"stable"}, "1.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
assert.EqualError(t, err, fmt.Sprintf(`no bundles found for package %q matching version "1.0.0" in channels [stable]`, pkgName))
}
@@ -256,7 +256,7 @@ func TestChannelAndVersionExist(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{"alpha"}, "0.1.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{"alpha"}, "0.1.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
assert.Equal(t, genBundle(pkgName, "0.1.0"), *gotBundle)
@@ -278,7 +278,7 @@ func TestPreferNonDeprecated(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, ">=0.1.0 <=1.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=0.1.0 <=1.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
assert.Equal(t, genBundle(pkgName, "0.1.0"), *gotBundle)
@@ -300,7 +300,7 @@ func TestAcceptDeprecated(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.1", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.1", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
assert.Equal(t, genBundle(pkgName, "1.0.1"), *gotBundle)
@@ -383,7 +383,7 @@ func TestPackageVariationsBetweenCatalogs(t *testing.T) {
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
t.Run("when bundle candidates for a package are deprecated in all but one catalog", func(t *testing.T) {
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.3", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.3", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
// We choose the only non-deprecated package
@@ -393,7 +393,7 @@ func TestPackageVariationsBetweenCatalogs(t *testing.T) {
})
t.Run("when bundle candidates are found and deprecated in multiple catalogs", func(t *testing.T) {
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.1", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.1", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.Error(t, err)
// We will not make a decision on which catalog to use
@@ -404,7 +404,7 @@ func TestPackageVariationsBetweenCatalogs(t *testing.T) {
})
t.Run("when bundle candidates are found and not deprecated in multiple catalogs", func(t *testing.T) {
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.4", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.4", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.Error(t, err)
// We will not make a decision on which catalog to use
@@ -415,7 +415,7 @@ func TestPackageVariationsBetweenCatalogs(t *testing.T) {
})
t.Run("highest semver bundle is chosen when candidates are all from the same catalog", func(t *testing.T) {
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.4 <=1.0.5", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.4 <=1.0.5", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
// Bundles within one catalog for a package will be sorted by semver and deprecation and the best is returned
@@ -440,8 +440,8 @@ func TestUpgradeFoundLegacy(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
- installedBundle := &ocv1alpha1.BundleMetadata{
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
+ installedBundle := &ocv1.BundleMetadata{
Name: bundleName(pkgName, "0.1.0"),
Version: "0.1.0",
}
@@ -468,8 +468,8 @@ func TestUpgradeNotFoundLegacy(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "<1.0.0 >=2.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
- installedBundle := &ocv1alpha1.BundleMetadata{
+ ce := buildFooClusterExtension(pkgName, []string{}, "<1.0.0 >=2.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
+ installedBundle := &ocv1.BundleMetadata{
Name: bundleName(pkgName, "0.1.0"),
Version: "0.1.0",
}
@@ -493,8 +493,8 @@ func TestUpgradeFoundSemver(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
- installedBundle := &ocv1alpha1.BundleMetadata{
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
+ installedBundle := &ocv1.BundleMetadata{
Name: bundleName(pkgName, "1.0.0"),
Version: "1.0.0",
}
@@ -523,8 +523,8 @@ func TestUpgradeNotFoundSemver(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "!=0.1.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
- installedBundle := &ocv1alpha1.BundleMetadata{
+ ce := buildFooClusterExtension(pkgName, []string{}, "!=0.1.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
+ installedBundle := &ocv1.BundleMetadata{
Name: bundleName(pkgName, "0.1.0"),
Version: "0.1.0",
}
@@ -548,8 +548,8 @@ func TestDowngradeFound(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "<1.0.2", ocv1alpha1.UpgradeConstraintPolicySelfCertified)
- installedBundle := &ocv1alpha1.BundleMetadata{
+ ce := buildFooClusterExtension(pkgName, []string{}, "<1.0.2", ocv1.UpgradeConstraintPolicySelfCertified)
+ installedBundle := &ocv1.BundleMetadata{
Name: bundleName(pkgName, "1.0.2"),
Version: "1.0.2",
}
@@ -576,8 +576,8 @@ func TestDowngradeNotFound(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, ">0.1.0 <1.0.0", ocv1alpha1.UpgradeConstraintPolicySelfCertified)
- installedBundle := &ocv1alpha1.BundleMetadata{
+ ce := buildFooClusterExtension(pkgName, []string{}, ">0.1.0 <1.0.0", ocv1.UpgradeConstraintPolicySelfCertified)
+ installedBundle := &ocv1.BundleMetadata{
Name: bundleName(pkgName, "1.0.2"),
Version: "1.0.2",
}
@@ -640,19 +640,17 @@ func TestCatalogWalker(t *testing.T) {
})
}
-func buildFooClusterExtension(pkg string, channels []string, version string, upgradeConstraintPolicy ocv1alpha1.UpgradeConstraintPolicy) *ocv1alpha1.ClusterExtension {
- return &ocv1alpha1.ClusterExtension{
+func buildFooClusterExtension(pkg string, channels []string, version string, upgradeConstraintPolicy ocv1.UpgradeConstraintPolicy) *ocv1.ClusterExtension {
+ return &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: pkg,
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{Name: "default"},
- },
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Namespace: "default",
+ ServiceAccount: ocv1.ServiceAccountReference{Name: "default"},
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: pkg,
Version: version,
Channels: channels,
@@ -762,15 +760,15 @@ func genPackage(pkg string) *declcfg.DeclarativeConfig {
func TestInvalidClusterExtensionCatalogMatchExpressions(t *testing.T) {
r := CatalogResolver{}
- ce := &ocv1alpha1.ClusterExtension{
+ ce := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
- Catalog: &ocv1alpha1.CatalogSource{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
+ Catalog: &ocv1.CatalogSource{
PackageName: "foo",
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "name",
@@ -794,15 +792,15 @@ func TestInvalidClusterExtensionCatalogMatchLabelsName(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := &ocv1alpha1.ClusterExtension{
+ ce := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
- Catalog: &ocv1alpha1.CatalogSource{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
+ Catalog: &ocv1.CatalogSource{
PackageName: "foo",
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"": "value"},
},
},
@@ -820,15 +818,15 @@ func TestInvalidClusterExtensionCatalogMatchLabelsValue(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := &ocv1alpha1.ClusterExtension{
+ ce := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
- Catalog: &ocv1alpha1.CatalogSource{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
+ Catalog: &ocv1.CatalogSource{
PackageName: "foo",
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": "&value"},
},
},
@@ -851,8 +849,10 @@ func TestClusterExtensionMatchLabel(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
- ce.Spec.Source.Catalog.Selector.MatchLabels = map[string]string{"olm.operatorframework.io/metadata.name": "b"}
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
+ ce.Spec.Source.Catalog.Selector = &metav1.LabelSelector{
+ MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": "b"},
+ }
_, _, _, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
@@ -870,8 +870,10 @@ func TestClusterExtensionNoMatchLabel(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
- ce.Spec.Source.Catalog.Selector.MatchLabels = map[string]string{"olm.operatorframework.io/metadata.name": "a"}
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
+ ce.Spec.Source.Catalog.Selector = &metav1.LabelSelector{
+ MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": "a"},
+ }
_, _, _, err := r.Resolve(context.Background(), ce, nil)
require.Error(t, err)
@@ -912,7 +914,7 @@ func TestUnequalPriority(t *testing.T) {
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, gotVersion, _, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
require.Equal(t, bsemver.MustParse("1.0.0"), *gotVersion)
@@ -933,7 +935,7 @@ func TestMultiplePriority(t *testing.T) {
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.1", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0 <=1.0.1", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.Error(t, err)
require.ErrorContains(t, err, "in multiple catalogs with the same priority [a b c]")
@@ -956,7 +958,7 @@ func TestMultipleChannels(t *testing.T) {
},
}
r := CatalogResolver{WalkCatalogsFunc: w.WalkCatalogs}
- ce := buildFooClusterExtension(pkgName, []string{"beta", "alpha"}, "", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{"beta", "alpha"}, "", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, gotDeprecation, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
assert.Equal(t, genBundle(pkgName, "2.0.0"), *gotBundle)
@@ -970,12 +972,12 @@ func TestAllCatalogsDisabled(t *testing.T) {
return []catalogd.ClusterCatalog{
{
Spec: catalogd.ClusterCatalogSpec{
- Availability: "Disabled",
+ AvailabilityMode: catalogd.AvailabilityModeUnavailable,
},
},
{
Spec: catalogd.ClusterCatalogSpec{
- Availability: "Disabled",
+ AvailabilityMode: catalogd.AvailabilityModeUnavailable,
},
},
}, nil
@@ -989,7 +991,7 @@ func TestAllCatalogsDisabled(t *testing.T) {
WalkCatalogsFunc: CatalogWalker(listCatalogs, getPackage),
}
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
_, _, _, err := r.Resolve(context.Background(), ce, nil)
require.Error(t, err)
assert.Contains(t, err.Error(), "no bundles found for package")
@@ -1004,8 +1006,8 @@ func TestSomeCatalogsDisabled(t *testing.T) {
Name: "enabledCatalog",
},
Spec: catalogd.ClusterCatalogSpec{
- Priority: 1, // Higher priority
- Availability: "Enabled",
+ Priority: 1, // Higher priority
+ AvailabilityMode: catalogd.AvailabilityModeAvailable,
},
},
{
@@ -1013,8 +1015,8 @@ func TestSomeCatalogsDisabled(t *testing.T) {
Name: "disabledCatalog",
},
Spec: catalogd.ClusterCatalogSpec{
- Priority: 0, // Lower priority (but disabled)
- Availability: "Disabled",
+ Priority: 0, // Lower priority (but disabled)
+ AvailabilityMode: catalogd.AvailabilityModeUnavailable,
},
},
}, nil
@@ -1029,7 +1031,7 @@ func TestSomeCatalogsDisabled(t *testing.T) {
WalkCatalogsFunc: CatalogWalker(listCatalogs, getPackage),
}
- ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0", ocv1alpha1.UpgradeConstraintPolicyCatalogProvided)
+ ce := buildFooClusterExtension(pkgName, []string{}, ">=1.0.0", ocv1.UpgradeConstraintPolicyCatalogProvided)
gotBundle, gotVersion, _, err := r.Resolve(context.Background(), ce, nil)
require.NoError(t, err)
require.NotNil(t, gotBundle)
diff --git a/internal/resolve/resolver.go b/internal/resolve/resolver.go
index de9b952b0..625111d63 100644
--- a/internal/resolve/resolver.go
+++ b/internal/resolve/resolver.go
@@ -7,15 +7,15 @@ import (
"github.com/operator-framework/operator-registry/alpha/declcfg"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
)
type Resolver interface {
- Resolve(ctx context.Context, ext *ocv1alpha1.ClusterExtension, installedBundle *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error)
+ Resolve(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error)
}
-type Func func(ctx context.Context, ext *ocv1alpha1.ClusterExtension, installedBundle *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error)
+type Func func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error)
-func (f Func) Resolve(ctx context.Context, ext *ocv1alpha1.ClusterExtension, installedBundle *ocv1alpha1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
+func (f Func) Resolve(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) {
return f(ctx, ext, installedBundle)
}
diff --git a/internal/rukpak/convert/registryv1.go b/internal/rukpak/convert/registryv1.go
index 0acdd0d6d..e2eff3bc3 100644
--- a/internal/rukpak/convert/registryv1.go
+++ b/internal/rukpak/convert/registryv1.go
@@ -195,6 +195,7 @@ func Convert(in RegistryV1, installNamespace string, targetNamespaces []string)
for _, depSpec := range in.CSV.Spec.InstallStrategy.StrategySpec.DeploymentSpecs {
annotations := util.MergeMaps(in.CSV.Annotations, depSpec.Spec.Template.Annotations)
annotations["olm.targetNamespaces"] = strings.Join(targetNamespaces, ",")
+ depSpec.Spec.Template.Annotations = annotations
deployments = append(deployments, appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
@@ -202,10 +203,9 @@ func Convert(in RegistryV1, installNamespace string, targetNamespaces []string)
},
ObjectMeta: metav1.ObjectMeta{
- Namespace: installNamespace,
- Name: depSpec.Name,
- Labels: depSpec.Label,
- Annotations: annotations,
+ Namespace: installNamespace,
+ Name: depSpec.Name,
+ Labels: depSpec.Label,
},
Spec: depSpec.Spec,
})
diff --git a/internal/rukpak/convert/registryv1_test.go b/internal/rukpak/convert/registryv1_test.go
index 991d5dbdd..8e9171dec 100644
--- a/internal/rukpak/convert/registryv1_test.go
+++ b/internal/rukpak/convert/registryv1_test.go
@@ -2,11 +2,13 @@ package convert
import (
"fmt"
+ "strings"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
@@ -73,7 +75,7 @@ var _ = Describe("RegistryV1 Suite", func() {
Expect(plainBundle.Objects).To(HaveLen(1))
By("verifying if ns has been set correctly")
- resObj := containsObject(unstructuredSvc, plainBundle.Objects)
+ resObj := findObjectByName(svc.Name, plainBundle.Objects)
Expect(resObj).NotTo(BeNil())
Expect(resObj.GetNamespace()).To(BeEquivalentTo(installNamespace))
})
@@ -99,7 +101,7 @@ var _ = Describe("RegistryV1 Suite", func() {
Expect(plainBundle.Objects).To(HaveLen(1))
By("verifying if ns has been set correctly")
- resObj := containsObject(unstructuredSvc, plainBundle.Objects)
+ resObj := findObjectByName(svc.Name, plainBundle.Objects)
Expect(resObj).NotTo(BeNil())
Expect(resObj.GetNamespace()).To(BeEquivalentTo(installNamespace))
})
@@ -157,7 +159,7 @@ var _ = Describe("RegistryV1 Suite", func() {
Expect(plainBundle.Objects).To(HaveLen(1))
By("verifying if ns has been set correctly")
- resObj := containsObject(unstructuredpriorityclass, plainBundle.Objects)
+ resObj := findObjectByName(pc.Name, plainBundle.Objects)
Expect(resObj).NotTo(BeNil())
Expect(resObj.GetNamespace()).To(BeEmpty())
})
@@ -167,12 +169,13 @@ var _ = Describe("RegistryV1 Suite", func() {
Context("Should generate objects successfully based on target namespaces", func() {
var (
svc corev1.Service
- csv v1alpha1.ClusterServiceVersion
+ baseCSV v1alpha1.ClusterServiceVersion
watchNamespaces []string
)
BeforeEach(func() {
- csv = v1alpha1.ClusterServiceVersion{
+ // base CSV definition that each test case will deep copy and modify
+ baseCSV = v1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "testCSV",
Annotations: map[string]string{
@@ -180,9 +183,25 @@ var _ = Describe("RegistryV1 Suite", func() {
},
},
Spec: v1alpha1.ClusterServiceVersionSpec{
- InstallModes: []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeMultiNamespace, Supported: true}},
InstallStrategy: v1alpha1.NamedInstallStrategy{
StrategySpec: v1alpha1.StrategyDetailsDeployment{
+ DeploymentSpecs: []v1alpha1.StrategyDeploymentSpec{
+ {
+ Name: "testDeployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "testContainer",
+ Image: "testImage",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
Permissions: []v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: "testServiceAccount",
@@ -199,6 +218,7 @@ var _ = Describe("RegistryV1 Suite", func() {
},
},
}
+
svc = corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testService",
@@ -208,13 +228,16 @@ var _ = Describe("RegistryV1 Suite", func() {
installNamespace = "testInstallNamespace"
})
- It("should convert into plain manifests successfully", func() {
+ It("should convert into plain manifests successfully with AllNamespaces", func() {
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeAllNamespaces, Supported: true}}
+
By("creating a registry v1 bundle")
- watchNamespaces = []string{"testWatchNs1", "testWatchNs2"}
+ watchNamespaces = []string{""}
unstructuredSvc := convertToUnstructured(svc)
registryv1Bundle = RegistryV1{
PackageName: "testPkg",
- CSV: csv,
+ CSV: *csv,
Others: []unstructured.Unstructured{unstructuredSvc},
}
@@ -224,41 +247,51 @@ var _ = Describe("RegistryV1 Suite", func() {
By("verifying if plain bundle has required objects")
Expect(plainBundle).ShouldNot(BeNil())
- Expect(plainBundle.Objects).To(HaveLen(6))
+ Expect(plainBundle.Objects).To(HaveLen(5))
+
+ By("verifying olm.targetNamespaces annotation in the deployment's pod template")
+ dep := findObjectByName("testDeployment", plainBundle.Objects)
+ Expect(dep).NotTo(BeNil())
+ Expect(dep.(*appsv1.Deployment).Spec.Template.Annotations).To(HaveKeyWithValue("olm.targetNamespaces", strings.Join(watchNamespaces, ",")))
})
- It("should convert into plain manifests successfully with single namespace", func() {
- csv = v1alpha1.ClusterServiceVersion{
- ObjectMeta: metav1.ObjectMeta{
- Name: "testCSV",
- },
- Spec: v1alpha1.ClusterServiceVersionSpec{
- InstallModes: []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeSingleNamespace, Supported: true}},
- InstallStrategy: v1alpha1.NamedInstallStrategy{
- StrategySpec: v1alpha1.StrategyDetailsDeployment{
- Permissions: []v1alpha1.StrategyDeploymentPermissions{
- {
- ServiceAccountName: "testServiceAccount",
- Rules: []rbacv1.PolicyRule{
- {
- APIGroups: []string{"test"},
- Resources: []string{"pods"},
- Verbs: []string{"*"},
- },
- },
- },
- },
- },
- },
- },
+ It("should convert into plain manifests successfully with MultiNamespace", func() {
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeMultiNamespace, Supported: true}}
+
+ By("creating a registry v1 bundle")
+ watchNamespaces = []string{"testWatchNs1", "testWatchNs2"}
+ unstructuredSvc := convertToUnstructured(svc)
+ registryv1Bundle = RegistryV1{
+ PackageName: "testPkg",
+ CSV: *csv,
+ Others: []unstructured.Unstructured{unstructuredSvc},
}
+ By("converting to plain")
+ plainBundle, err := Convert(registryv1Bundle, installNamespace, watchNamespaces)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("verifying if plain bundle has required objects")
+ Expect(plainBundle).ShouldNot(BeNil())
+ Expect(plainBundle.Objects).To(HaveLen(7))
+
+ By("verifying olm.targetNamespaces annotation in the deployment's pod template")
+ dep := findObjectByName("testDeployment", plainBundle.Objects)
+ Expect(dep).NotTo(BeNil())
+ Expect(dep.(*appsv1.Deployment).Spec.Template.Annotations).To(HaveKeyWithValue("olm.targetNamespaces", strings.Join(watchNamespaces, ",")))
+ })
+
+ It("should convert into plain manifests successfully with SingleNamespace", func() {
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeSingleNamespace, Supported: true}}
+
By("creating a registry v1 bundle")
watchNamespaces = []string{"testWatchNs1"}
unstructuredSvc := convertToUnstructured(svc)
registryv1Bundle = RegistryV1{
PackageName: "testPkg",
- CSV: csv,
+ CSV: *csv,
Others: []unstructured.Unstructured{unstructuredSvc},
}
@@ -268,41 +301,24 @@ var _ = Describe("RegistryV1 Suite", func() {
By("verifying if plain bundle has required objects")
Expect(plainBundle).ShouldNot(BeNil())
- Expect(plainBundle.Objects).To(HaveLen(4))
+ Expect(plainBundle.Objects).To(HaveLen(5))
+
+ By("verifying olm.targetNamespaces annotation in the deployment's pod template")
+ dep := findObjectByName("testDeployment", plainBundle.Objects)
+ Expect(dep).NotTo(BeNil())
+ Expect(dep.(*appsv1.Deployment).Spec.Template.Annotations).To(HaveKeyWithValue("olm.targetNamespaces", strings.Join(watchNamespaces, ",")))
})
It("should convert into plain manifests successfully with own namespace", func() {
- csv = v1alpha1.ClusterServiceVersion{
- ObjectMeta: metav1.ObjectMeta{
- Name: "testCSV",
- },
- Spec: v1alpha1.ClusterServiceVersionSpec{
- InstallModes: []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeOwnNamespace, Supported: true}},
- InstallStrategy: v1alpha1.NamedInstallStrategy{
- StrategySpec: v1alpha1.StrategyDetailsDeployment{
- Permissions: []v1alpha1.StrategyDeploymentPermissions{
- {
- ServiceAccountName: "testServiceAccount",
- Rules: []rbacv1.PolicyRule{
- {
- APIGroups: []string{"test"},
- Resources: []string{"pods"},
- Verbs: []string{"*"},
- },
- },
- },
- },
- },
- },
- },
- }
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeOwnNamespace, Supported: true}}
By("creating a registry v1 bundle")
watchNamespaces = []string{installNamespace}
unstructuredSvc := convertToUnstructured(svc)
registryv1Bundle = RegistryV1{
PackageName: "testPkg",
- CSV: csv,
+ CSV: *csv,
Others: []unstructured.Unstructured{unstructuredSvc},
}
@@ -312,16 +328,24 @@ var _ = Describe("RegistryV1 Suite", func() {
By("verifying if plain bundle has required objects")
Expect(plainBundle).ShouldNot(BeNil())
- Expect(plainBundle.Objects).To(HaveLen(4))
+ Expect(plainBundle.Objects).To(HaveLen(5))
+
+ By("verifying olm.targetNamespaces annotation in the deployment's pod template")
+ dep := findObjectByName("testDeployment", plainBundle.Objects)
+ Expect(dep).NotTo(BeNil())
+ Expect(dep.(*appsv1.Deployment).Spec.Template.Annotations).To(HaveKeyWithValue("olm.targetNamespaces", strings.Join(watchNamespaces, ",")))
})
It("should error when multinamespace mode is supported with an empty string in target namespaces", func() {
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeMultiNamespace, Supported: true}}
+
By("creating a registry v1 bundle")
watchNamespaces = []string{"testWatchNs1", ""}
unstructuredSvc := convertToUnstructured(svc)
registryv1Bundle = RegistryV1{
PackageName: "testPkg",
- CSV: csv,
+ CSV: *csv,
Others: []unstructured.Unstructured{unstructuredSvc},
}
@@ -332,21 +356,15 @@ var _ = Describe("RegistryV1 Suite", func() {
})
It("should error when single namespace mode is disabled with more than one target namespaces", func() {
- csv = v1alpha1.ClusterServiceVersion{
- ObjectMeta: metav1.ObjectMeta{
- Name: "testCSV",
- },
- Spec: v1alpha1.ClusterServiceVersionSpec{
- InstallModes: []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeSingleNamespace, Supported: false}},
- },
- }
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeSingleNamespace, Supported: false}}
By("creating a registry v1 bundle")
watchNamespaces = []string{"testWatchNs1", "testWatchNs2"}
unstructuredSvc := convertToUnstructured(svc)
registryv1Bundle = RegistryV1{
PackageName: "testPkg",
- CSV: csv,
+ CSV: *csv,
Others: []unstructured.Unstructured{unstructuredSvc},
}
@@ -357,18 +375,12 @@ var _ = Describe("RegistryV1 Suite", func() {
})
It("should error when all namespace mode is disabled with target namespace containing an empty string", func() {
- csv = v1alpha1.ClusterServiceVersion{
- ObjectMeta: metav1.ObjectMeta{
- Name: "testCSV",
- },
- Spec: v1alpha1.ClusterServiceVersionSpec{
- InstallModes: []v1alpha1.InstallMode{
- {Type: v1alpha1.InstallModeTypeAllNamespaces, Supported: false},
- {Type: v1alpha1.InstallModeTypeOwnNamespace, Supported: true},
- {Type: v1alpha1.InstallModeTypeSingleNamespace, Supported: true},
- {Type: v1alpha1.InstallModeTypeMultiNamespace, Supported: true},
- },
- },
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{
+ {Type: v1alpha1.InstallModeTypeAllNamespaces, Supported: false},
+ {Type: v1alpha1.InstallModeTypeOwnNamespace, Supported: true},
+ {Type: v1alpha1.InstallModeTypeSingleNamespace, Supported: true},
+ {Type: v1alpha1.InstallModeTypeMultiNamespace, Supported: true},
}
By("creating a registry v1 bundle")
@@ -376,7 +388,7 @@ var _ = Describe("RegistryV1 Suite", func() {
unstructuredSvc := convertToUnstructured(svc)
registryv1Bundle = RegistryV1{
PackageName: "testPkg",
- CSV: csv,
+ CSV: *csv,
Others: []unstructured.Unstructured{unstructuredSvc},
}
@@ -387,12 +399,15 @@ var _ = Describe("RegistryV1 Suite", func() {
})
It("should propagate csv annotations to chart metadata annotation", func() {
+ csv := baseCSV.DeepCopy()
+ csv.Spec.InstallModes = []v1alpha1.InstallMode{{Type: v1alpha1.InstallModeTypeMultiNamespace, Supported: true}}
+
By("creating a registry v1 bundle")
watchNamespaces = []string{"testWatchNs1", "testWatchNs2"}
unstructuredSvc := convertToUnstructured(svc)
registryv1Bundle = RegistryV1{
PackageName: "testPkg",
- CSV: csv,
+ CSV: *csv,
Others: []unstructured.Unstructured{unstructuredSvc},
}
@@ -462,11 +477,11 @@ func convertToUnstructured(obj interface{}) unstructured.Unstructured {
return unstructured.Unstructured{Object: unstructuredObj}
}
-func containsObject(obj unstructured.Unstructured, result []client.Object) client.Object {
+func findObjectByName(name string, result []client.Object) client.Object {
for _, o := range result {
// Since this is a controlled env, comparing only the names is sufficient for now.
// In future, compare GVKs too by ensuring its set on the unstructuredObj.
- if o.GetName() == obj.GetName() {
+ if o.GetName() == name {
return o
}
}
diff --git a/internal/rukpak/preflights/crdupgradesafety/checks.go b/internal/rukpak/preflights/crdupgradesafety/checks.go
index cc7be4a66..b795b11de 100644
--- a/internal/rukpak/preflights/crdupgradesafety/checks.go
+++ b/internal/rukpak/preflights/crdupgradesafety/checks.go
@@ -1,12 +1,16 @@
package crdupgradesafety
import (
+ "bytes"
+ "cmp"
"errors"
"fmt"
+ "reflect"
"slices"
kappcus "carvel.dev/kapp/pkg/kapp/crdupgradesafety"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
versionhelper "k8s.io/apimachinery/pkg/version"
)
@@ -70,3 +74,236 @@ func (c *ServedVersionValidator) Validate(old, new apiextensionsv1.CustomResourc
func (c *ServedVersionValidator) Name() string {
return "ServedVersionValidator"
}
+
+type resetFunc func(diff kappcus.FieldDiff) kappcus.FieldDiff
+
+func isHandled(diff kappcus.FieldDiff, reset resetFunc) bool {
+ diff = reset(diff)
+ return reflect.DeepEqual(diff.Old, diff.New)
+}
+
+func Enum(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.Enum = []apiextensionsv1.JSON{}
+ diff.New.Enum = []apiextensionsv1.JSON{}
+ return diff
+ }
+
+ oldEnums := sets.New[string]()
+ for _, json := range diff.Old.Enum {
+ oldEnums.Insert(string(json.Raw))
+ }
+
+ newEnums := sets.New[string]()
+ for _, json := range diff.New.Enum {
+ newEnums.Insert(string(json.Raw))
+ }
+ diffEnums := oldEnums.Difference(newEnums)
+ var err error
+
+ switch {
+ case oldEnums.Len() == 0 && newEnums.Len() > 0:
+ err = fmt.Errorf("enum constraints %v added when there were no restrictions previously", newEnums.UnsortedList())
+ case diffEnums.Len() > 0:
+ err = fmt.Errorf("enums %v removed from the set of previously allowed values", diffEnums.UnsortedList())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func Required(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.Required = []string{}
+ diff.New.Required = []string{}
+ return diff
+ }
+
+ oldRequired := sets.New(diff.Old.Required...)
+ newRequired := sets.New(diff.New.Required...)
+ diffRequired := newRequired.Difference(oldRequired)
+ var err error
+
+ if diffRequired.Len() > 0 {
+ err = fmt.Errorf("new required fields %v added", diffRequired.UnsortedList())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func maxVerification[T cmp.Ordered](older *T, newer *T) error {
+ var err error
+ switch {
+ case older == nil && newer != nil:
+ err = fmt.Errorf("constraint %v added when there were no restrictions previously", *newer)
+ case older != nil && newer != nil && *newer < *older:
+ err = fmt.Errorf("constraint decreased from %v to %v", *older, *newer)
+ }
+ return err
+}
+
+func Maximum(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.Maximum = nil
+ diff.New.Maximum = nil
+ return diff
+ }
+
+ err := maxVerification(diff.Old.Maximum, diff.New.Maximum)
+ if err != nil {
+ err = fmt.Errorf("maximum: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func MaxItems(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.MaxItems = nil
+ diff.New.MaxItems = nil
+ return diff
+ }
+
+ err := maxVerification(diff.Old.MaxItems, diff.New.MaxItems)
+ if err != nil {
+ err = fmt.Errorf("maxItems: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func MaxLength(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.MaxLength = nil
+ diff.New.MaxLength = nil
+ return diff
+ }
+
+ err := maxVerification(diff.Old.MaxLength, diff.New.MaxLength)
+ if err != nil {
+ err = fmt.Errorf("maxLength: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func MaxProperties(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.MaxProperties = nil
+ diff.New.MaxProperties = nil
+ return diff
+ }
+
+ err := maxVerification(diff.Old.MaxProperties, diff.New.MaxProperties)
+ if err != nil {
+ err = fmt.Errorf("maxProperties: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func minVerification[T cmp.Ordered](older *T, newer *T) error {
+ var err error
+ switch {
+ case older == nil && newer != nil:
+ err = fmt.Errorf("constraint %v added when there were no restrictions previously", *newer)
+ case older != nil && newer != nil && *newer > *older:
+ err = fmt.Errorf("constraint increased from %v to %v", *older, *newer)
+ }
+ return err
+}
+
+func Minimum(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.Minimum = nil
+ diff.New.Minimum = nil
+ return diff
+ }
+
+ err := minVerification(diff.Old.Minimum, diff.New.Minimum)
+ if err != nil {
+ err = fmt.Errorf("minimum: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func MinItems(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.MinItems = nil
+ diff.New.MinItems = nil
+ return diff
+ }
+
+ err := minVerification(diff.Old.MinItems, diff.New.MinItems)
+ if err != nil {
+ err = fmt.Errorf("minItems: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func MinLength(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.MinLength = nil
+ diff.New.MinLength = nil
+ return diff
+ }
+
+ err := minVerification(diff.Old.MinLength, diff.New.MinLength)
+ if err != nil {
+ err = fmt.Errorf("minLength: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func MinProperties(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.MinProperties = nil
+ diff.New.MinProperties = nil
+ return diff
+ }
+
+ err := minVerification(diff.Old.MinProperties, diff.New.MinProperties)
+ if err != nil {
+ err = fmt.Errorf("minProperties: %s", err.Error())
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func Default(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.Default = nil
+ diff.New.Default = nil
+ return diff
+ }
+
+ var err error
+
+ switch {
+ case diff.Old.Default == nil && diff.New.Default != nil:
+ err = fmt.Errorf("default value %q added when there was no default previously", string(diff.New.Default.Raw))
+ case diff.Old.Default != nil && diff.New.Default == nil:
+ err = fmt.Errorf("default value %q removed", string(diff.Old.Default.Raw))
+ case diff.Old.Default != nil && diff.New.Default != nil && !bytes.Equal(diff.Old.Default.Raw, diff.New.Default.Raw):
+ err = fmt.Errorf("default value changed from %q to %q", string(diff.Old.Default.Raw), string(diff.New.Default.Raw))
+ }
+
+ return isHandled(diff, reset), err
+}
+
+func Type(diff kappcus.FieldDiff) (bool, error) {
+ reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff {
+ diff.Old.Type = ""
+ diff.New.Type = ""
+ return diff
+ }
+
+ var err error
+ if diff.Old.Type != diff.New.Type {
+ err = fmt.Errorf("type changed from %q to %q", diff.Old.Type, diff.New.Type)
+ }
+
+ return isHandled(diff, reset), err
+}
diff --git a/internal/rukpak/preflights/crdupgradesafety/checks_test.go b/internal/rukpak/preflights/crdupgradesafety/checks_test.go
new file mode 100644
index 000000000..6544006ce
--- /dev/null
+++ b/internal/rukpak/preflights/crdupgradesafety/checks_test.go
@@ -0,0 +1,907 @@
+package crdupgradesafety
+
+import (
+ "errors"
+ "testing"
+
+ kappcus "carvel.dev/kapp/pkg/kapp/crdupgradesafety"
+ "github.com/stretchr/testify/require"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ "k8s.io/utils/ptr"
+)
+
+type testcase struct {
+ name string
+ diff kappcus.FieldDiff
+ err error
+ handled bool
+}
+
+func TestEnum(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Enum: []apiextensionsv1.JSON{
+ {
+ Raw: []byte("foo"),
+ },
+ },
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Enum: []apiextensionsv1.JSON{
+ {
+ Raw: []byte("foo"),
+ },
+ },
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new enum constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Enum: []apiextensionsv1.JSON{},
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Enum: []apiextensionsv1.JSON{
+ {
+ Raw: []byte("foo"),
+ },
+ },
+ },
+ },
+ err: errors.New("enum constraints [foo] added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "remove enum value, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Enum: []apiextensionsv1.JSON{
+ {
+ Raw: []byte("foo"),
+ },
+ {
+ Raw: []byte("bar"),
+ },
+ },
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Enum: []apiextensionsv1.JSON{
+ {
+ Raw: []byte("bar"),
+ },
+ },
+ },
+ },
+ err: errors.New("enums [foo] removed from the set of previously allowed values"),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ {
+ name: "different field changed with enum, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ Enum: []apiextensionsv1.JSON{
+ {
+ Raw: []byte("foo"),
+ },
+ },
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ Enum: []apiextensionsv1.JSON{
+ {
+ Raw: []byte("foo"),
+ },
+ },
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := Enum(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestRequired(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Required: []string{
+ "foo",
+ },
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Required: []string{
+ "foo",
+ },
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new required field, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ Required: []string{
+ "foo",
+ },
+ },
+ },
+ err: errors.New("new required fields [foo] added"),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := Required(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMaximum(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Maximum: ptr.To(10.0),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Maximum: ptr.To(10.0),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new maximum constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ Maximum: ptr.To(10.0),
+ },
+ },
+ err: errors.New("maximum: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "maximum constraint decreased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Maximum: ptr.To(20.0),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Maximum: ptr.To(10.0),
+ },
+ },
+ err: errors.New("maximum: constraint decreased from 20 to 10"),
+ handled: true,
+ },
+ {
+ name: "maximum constraint increased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Maximum: ptr.To(20.0),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Maximum: ptr.To(30.0),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := Maximum(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMaxItems(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxItems: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxItems: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new maxItems constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxItems: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("maxItems: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "maxItems constraint decreased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxItems: ptr.To(int64(20)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxItems: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("maxItems: constraint decreased from 20 to 10"),
+ handled: true,
+ },
+ {
+ name: "maxitems constraint increased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxItems: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxItems: ptr.To(int64(20)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := MaxItems(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMaxLength(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxLength: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxLength: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new maxLength constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxLength: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("maxLength: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "maxLength constraint decreased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxLength: ptr.To(int64(20)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxLength: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("maxLength: constraint decreased from 20 to 10"),
+ handled: true,
+ },
+ {
+ name: "maxLength constraint increased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxLength: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxLength: ptr.To(int64(20)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := MaxLength(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMaxProperties(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxProperties: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxProperties: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new maxProperties constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxProperties: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("maxProperties: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "maxProperties constraint decreased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxProperties: ptr.To(int64(20)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxProperties: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("maxProperties: constraint decreased from 20 to 10"),
+ handled: true,
+ },
+ {
+ name: "maxProperties constraint increased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MaxProperties: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MaxProperties: ptr.To(int64(20)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := MaxProperties(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMinItems(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinItems: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinItems: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new minItems constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinItems: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("minItems: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "minItems constraint decreased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinItems: ptr.To(int64(20)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinItems: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "minItems constraint increased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinItems: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinItems: ptr.To(int64(20)),
+ },
+ },
+ err: errors.New("minItems: constraint increased from 10 to 20"),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := MinItems(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMinimum(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Minimum: ptr.To(10.0),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Minimum: ptr.To(10.0),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new minimum constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ Minimum: ptr.To(10.0),
+ },
+ },
+ err: errors.New("minimum: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "minLength constraint decreased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Minimum: ptr.To(20.0),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Minimum: ptr.To(10.0),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "minLength constraint increased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Minimum: ptr.To(10.0),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Minimum: ptr.To(20.0),
+ },
+ },
+ err: errors.New("minimum: constraint increased from 10 to 20"),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := Minimum(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMinLength(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinLength: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinLength: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new minLength constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinLength: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("minLength: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "minLength constraint decreased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinLength: ptr.To(int64(20)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinLength: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "minLength constraint increased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinLength: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinLength: ptr.To(int64(20)),
+ },
+ },
+ err: errors.New("minLength: constraint increased from 10 to 20"),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := MinLength(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestMinProperties(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinProperties: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinProperties: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new minProperties constraint, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinProperties: ptr.To(int64(10)),
+ },
+ },
+ err: errors.New("minProperties: constraint 10 added when there were no restrictions previously"),
+ handled: true,
+ },
+ {
+ name: "minProperties constraint decreased, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinProperties: ptr.To(int64(20)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinProperties: ptr.To(int64(10)),
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "minProperties constraint increased, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ MinProperties: ptr.To(int64(10)),
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ MinProperties: ptr.To(int64(20)),
+ },
+ },
+ err: errors.New("minProperties: constraint increased from 10 to 20"),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := MinProperties(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestDefault(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Default: &apiextensionsv1.JSON{
+ Raw: []byte("foo"),
+ },
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Default: &apiextensionsv1.JSON{
+ Raw: []byte("foo"),
+ },
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "new default value, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{},
+ New: &apiextensionsv1.JSONSchemaProps{
+ Default: &apiextensionsv1.JSON{
+ Raw: []byte("foo"),
+ },
+ },
+ },
+ err: errors.New("default value \"foo\" added when there was no default previously"),
+ handled: true,
+ },
+ {
+ name: "default value removed, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Default: &apiextensionsv1.JSON{
+ Raw: []byte("foo"),
+ },
+ },
+ New: &apiextensionsv1.JSONSchemaProps{},
+ },
+ err: errors.New("default value \"foo\" removed"),
+ handled: true,
+ },
+ {
+ name: "default value changed, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Default: &apiextensionsv1.JSON{
+ Raw: []byte("foo"),
+ },
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Default: &apiextensionsv1.JSON{
+ Raw: []byte("bar"),
+ },
+ },
+ },
+ err: errors.New("default value changed from \"foo\" to \"bar\""),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := Default(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
+
+func TestType(t *testing.T) {
+ for _, tc := range []testcase{
+ {
+ name: "no diff, no error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Type: "string",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Type: "string",
+ },
+ },
+ err: nil,
+ handled: true,
+ },
+ {
+ name: "type changed, error, handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ Type: "string",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ Type: "integer",
+ },
+ },
+ err: errors.New("type changed from \"string\" to \"integer\""),
+ handled: true,
+ },
+ {
+ name: "different field changed, no error, not handled",
+ diff: kappcus.FieldDiff{
+ Old: &apiextensionsv1.JSONSchemaProps{
+ ID: "foo",
+ },
+ New: &apiextensionsv1.JSONSchemaProps{
+ ID: "bar",
+ },
+ },
+ err: nil,
+ handled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ handled, err := Type(tc.diff)
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.handled, handled)
+ })
+ }
+}
diff --git a/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety.go b/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety.go
index 3f91c8c2b..7cdd905f6 100644
--- a/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety.go
+++ b/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety.go
@@ -32,17 +32,18 @@ type Preflight struct {
func NewPreflight(crdCli apiextensionsv1client.CustomResourceDefinitionInterface, opts ...Option) *Preflight {
changeValidations := []kappcus.ChangeValidation{
- kappcus.EnumChangeValidation,
- kappcus.RequiredFieldChangeValidation,
- kappcus.MaximumChangeValidation,
- kappcus.MaximumItemsChangeValidation,
- kappcus.MaximumLengthChangeValidation,
- kappcus.MaximumPropertiesChangeValidation,
- kappcus.MinimumChangeValidation,
- kappcus.MinimumItemsChangeValidation,
- kappcus.MinimumLengthChangeValidation,
- kappcus.MinimumPropertiesChangeValidation,
- kappcus.DefaultValueChangeValidation,
+ Enum,
+ Required,
+ Maximum,
+ MaxItems,
+ MaxLength,
+ MaxProperties,
+ Minimum,
+ MinItems,
+ MinLength,
+ MinProperties,
+ Default,
+ Type,
}
p := &Preflight{
crdClient: crdCli,
diff --git a/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go b/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go
index 39e0a0fe9..98b2289bd 100644
--- a/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go
+++ b/internal/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go
@@ -166,17 +166,17 @@ func TestInstall(t *testing.T) {
wantErrMsgs: []string{
`"NoScopeChange"`,
`"NoStoredVersionRemoved"`,
- `enums added`,
- `new required fields added`,
- `maximum constraint added when one did not exist previously`,
- `maximum items constraint added`,
- `maximum length constraint added`,
- `maximum properties constraint added`,
- `minimum constraint added when one did not exist previously`,
- `minimum items constraint added`,
- `minimum length constraint added`,
- `minimum properties constraint added`,
- `new value added as default`,
+ `enum constraints`,
+ `new required fields`,
+ `maximum: constraint`,
+ `maxItems: constraint`,
+ `maxLength: constraint`,
+ `maxProperties: constraint`,
+ `minimum: constraint`,
+ `minItems: constraint`,
+ `minLength: constraint`,
+ `minProperties: constraint`,
+ `default value`,
},
},
{
@@ -303,17 +303,17 @@ func TestUpgrade(t *testing.T) {
wantErrMsgs: []string{
`"NoScopeChange"`,
`"NoStoredVersionRemoved"`,
- `enums added`,
- `new required fields added`,
- `maximum constraint added when one did not exist previously`,
- `maximum items constraint added`,
- `maximum length constraint added`,
- `maximum properties constraint added`,
- `minimum constraint added when one did not exist previously`,
- `minimum items constraint added`,
- `minimum length constraint added`,
- `minimum properties constraint added`,
- `new value added as default`,
+ `enum constraints`,
+ `new required fields`,
+ `maximum: constraint`,
+ `maxItems: constraint`,
+ `maxLength: constraint`,
+ `maxProperties: constraint`,
+ `minimum: constraint`,
+ `minItems: constraint`,
+ `minLength: constraint`,
+ `minProperties: constraint`,
+ `default value`,
},
},
{
@@ -345,7 +345,7 @@ func TestUpgrade(t *testing.T) {
Manifest: getManifestString(t, "crd-conversion-no-webhook.json"),
},
wantErrMsgs: []string{
- `"ServedVersionValidator" validation failed: version upgrade "v1" to "v2", field "^.spec.foobarbaz": enum values removed`,
+ `"ServedVersionValidator" validation failed: version upgrade "v1" to "v2", field "^.spec.foobarbaz": enums`,
},
},
}
diff --git a/internal/scheme/scheme.go b/internal/scheme/scheme.go
index 933d89b05..a5fae6298 100644
--- a/internal/scheme/scheme.go
+++ b/internal/scheme/scheme.go
@@ -7,16 +7,16 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- catalogd "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
)
var Scheme = runtime.NewScheme()
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(Scheme))
- utilruntime.Must(ocv1alpha1.AddToScheme(Scheme))
+ utilruntime.Must(ocv1.AddToScheme(Scheme))
utilruntime.Must(catalogd.AddToScheme(Scheme))
utilruntime.Must(appsv1.AddToScheme(Scheme))
utilruntime.Must(corev1.AddToScheme(Scheme))
diff --git a/mkdocs.yml b/mkdocs.yml
index 7df6b7eba..f7b20ae07 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -41,6 +41,7 @@ nav:
- Version Range Upgrades: howto/how-to-version-range-upgrades.md
- Z-Stream Upgrades: howto/how-to-z-stream-upgrades.md
- Derive Service Account Permissions: howto/derive-service-account.md
+ - Grant Access to Your Extension's API: howto/how-to-grant-api-access.md
- Conceptual Guides:
- Single Owner Objects: concepts/single-owner-objects.md
- Upgrade Support: concepts/upgrade-support.md
diff --git a/openshift/manifests/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml b/openshift/manifests/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml
index 6e4fe44ff..afc4a5ca9 100644
--- a/openshift/manifests/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml
+++ b/openshift/manifests/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml
@@ -30,7 +30,7 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- name: v1alpha1
+ name: v1
schema:
openAPIV3Schema:
description: ClusterExtension is the Schema for the clusterextensions API
@@ -53,60 +53,20 @@ spec:
metadata:
type: object
spec:
- description: ClusterExtensionSpec defines the desired state of ClusterExtension
+ description: spec is an optional field that defines the desired state of the ClusterExtension.
properties:
install:
description: |-
- install is a required field used to configure the installation options
- for the ClusterExtension such as the installation namespace,
- the service account and the pre-flight check configuration.
-
- Below is a minimal example of an installation definition (in yaml):
- install:
- namespace: example-namespace
- serviceAccount:
- name: example-sa
+ install is an optional field used to configure the installation options
+ for the ClusterExtension such as the pre-flight check configuration.
properties:
- namespace:
- description: |-
- namespace is a reference to the Namespace in which the bundle of
- content for the package referenced in the packageName field will be applied.
- The bundle may contain cluster-scoped resources or resources that are
- applied to other Namespaces. This Namespace is expected to exist.
-
- namespace is required, immutable, and follows the DNS label standard
- as defined in [RFC 1123]. This means that valid values:
- - Contain no more than 63 characters
- - Contain only lowercase alphanumeric characters or '-'
- - Start with an alphanumeric character
- - End with an alphanumeric character
-
- Some examples of valid values are:
- - some-namespace
- - 123-namespace
- - 1-namespace-2
- - somenamespace
-
- Some examples of invalid values are:
- - -some-namespace
- - some-namespace-
- - thisisareallylongnamespacenamethatisgreaterthanthemaximumlength
- - some.namespace
-
- [RFC 1123]: https://tools.ietf.org/html/rfc1123
- maxLength: 63
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
- type: string
- x-kubernetes-validations:
- - message: namespace is immutable
- rule: self == oldSelf
preflight:
description: |-
- preflight is an optional field that can be used to configure the preflight checks run before installation or upgrade of the content for the package specified in the packageName field.
-
- When specified, it overrides the default configuration of the preflight checks that are required to execute successfully during an install/upgrade operation.
+ preflight is an optional field that can be used to configure the checks that are
+ run before installation or upgrade of the content for the package specified in the packageName field.
- When not specified, the default configuration for each preflight check will be used.
+ When specified, it replaces the default preflight configuration for install/upgrade actions.
+ When not specified, the default configuration will be used.
properties:
crdUpgradeSafety:
description: |-
@@ -115,31 +75,25 @@ spec:
The CRD Upgrade Safety pre-flight check safeguards from unintended
consequences of upgrading a CRD, such as data loss.
-
- This field is required if the spec.install.preflight field is specified.
properties:
- policy:
- default: Enabled
+ enforcement:
description: |-
- policy is used to configure the state of the CRD Upgrade Safety pre-flight check.
-
- This field is required when the spec.install.preflight.crdUpgradeSafety field is
- specified.
+ enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
- Allowed values are ["Enabled", "Disabled"]. The default value is "Enabled".
+ Allowed values are "None" or "Strict". The default value is "Strict".
- When set to "Disabled", the CRD Upgrade Safety pre-flight check will be skipped
+ When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
when performing an upgrade operation. This should be used with caution as
unintended consequences such as data loss can occur.
- When set to "Enabled", the CRD Upgrade Safety pre-flight check will be run when
+ When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
performing an upgrade operation.
enum:
- - Enabled
- - Disabled
+ - None
+ - Strict
type: string
required:
- - policy
+ - enforcement
type: object
required:
- crdUpgradeSafety
@@ -147,56 +101,73 @@ spec:
x-kubernetes-validations:
- message: at least one of [crdUpgradeSafety] are required when preflight is specified
rule: has(self.crdUpgradeSafety)
- serviceAccount:
+ type: object
+ x-kubernetes-validations:
+ - message: at least one of [preflight] are required when install is specified
+ rule: has(self.preflight)
+ namespace:
+ description: |-
+ namespace is a reference to a Kubernetes namespace.
+ This is the namespace in which the provided ServiceAccount must exist.
+ It also designates the default namespace where namespace-scoped resources
+ for the extension are applied to the cluster.
+ Some extensions may contain namespace-scoped resources to be applied in other namespaces.
+ This namespace must exist.
+
+ namespace is required, immutable, and follows the DNS label standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
+ start and end with an alphanumeric character, and be no longer than 63 characters
+
+ [RFC 1123]: https://tools.ietf.org/html/rfc1123
+ maxLength: 63
+ type: string
+ x-kubernetes-validations:
+ - message: namespace is immutable
+ rule: self == oldSelf
+ - message: namespace must be a valid DNS1123 label
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
+ serviceAccount:
+ description: |-
+ serviceAccount is a reference to a ServiceAccount used to perform all interactions
+ with the cluster that are required to manage the extension.
+ The ServiceAccount must be configured with the necessary permissions to perform these interactions.
+ The ServiceAccount must exist in the namespace referenced in the spec.
+ serviceAccount is required.
+ properties:
+ name:
description: |-
- serviceAccount is a required reference to a ServiceAccount that exists
- in the installNamespace. The provided ServiceAccount is used to install and
- manage the content for the package specified in the packageName field.
-
- In order to successfully install and manage the content for the package,
- the ServiceAccount provided via this field should be configured with the
- appropriate permissions to perform the necessary operations on all the
- resources that are included in the bundle of content being applied.
- properties:
- name:
- description: |-
- name is a required, immutable reference to the name of the ServiceAccount
- to be used for installation and management of the content for the package
- specified in the packageName field.
+ name is a required, immutable reference to the name of the ServiceAccount
+ to be used for installation and management of the content for the package
+ specified in the packageName field.
- This ServiceAccount is expected to exist in the installNamespace.
+ This ServiceAccount must exist in the installNamespace.
- This field follows the DNS subdomain name standard as defined in [RFC
- 1123]. This means that valid values:
- - Contain no more than 253 characters
- - Contain only lowercase alphanumeric characters, '-', or '.'
- - Start with an alphanumeric character
- - End with an alphanumeric character
+ name follows the DNS subdomain standard as defined in [RFC 1123].
+ It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters.
- Some examples of valid values are:
- - some-serviceaccount
- - 123-serviceaccount
- - 1-serviceaccount-2
- - someserviceaccount
- - some.serviceaccount
+ Some examples of valid values are:
+ - some-serviceaccount
+ - 123-serviceaccount
+ - 1-serviceaccount-2
+ - someserviceaccount
+ - some.serviceaccount
- Some examples of invalid values are:
- - -some-serviceaccount
- - some-serviceaccount-
+ Some examples of invalid values are:
+ - -some-serviceaccount
+ - some-serviceaccount-
- [RFC 1123]: https://tools.ietf.org/html/rfc1123
- maxLength: 253
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
- type: string
- x-kubernetes-validations:
- - message: name is immutable
- rule: self == oldSelf
- required:
- - name
- type: object
+ [RFC 1123]: https://tools.ietf.org/html/rfc1123
+ maxLength: 253
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ - message: name must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
required:
- - namespace
- - serviceAccount
+ - name
type: object
source:
description: |-
@@ -215,15 +186,20 @@ spec:
properties:
catalog:
description: |-
- catalog is used to configure how information is sourced from a catalog. This field must be defined when sourceType is set to "Catalog",
- and must be the only field defined for this sourceType.
+ catalog is used to configure how information is sourced from a catalog.
+ This field is required when sourceType is "Catalog", and forbidden otherwise.
properties:
channels:
description: |-
channels is an optional reference to a set of channels belonging to
the package specified in the packageName field.
- A "channel" is a package author defined stream of updates for an extension.
+ A "channel" is a package-author-defined stream of updates for an extension.
+
+ Each channel in the list must follow the DNS subdomain standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters. No more than 256 channels can be specified.
When specified, it is used to constrain the set of installable bundles and
the automated upgrade path. This constraint is an AND operation with the
@@ -235,13 +211,6 @@ spec:
When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
- This field follows the DNS subdomain name standard as defined in [RFC
- 1123]. This means that valid entries:
- - Contain no more than 253 characters
- - Contain only lowercase alphanumeric characters, '-', or '.'
- - Start with an alphanumeric character
- - End with an alphanumeric character
-
Some examples of valid values are:
- 1.1.x
- alpha
@@ -262,20 +231,21 @@ spec:
[RFC 1123]: https://tools.ietf.org/html/rfc1123
items:
maxLength: 253
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
+ x-kubernetes-validations:
+ - message: channels entries must be valid DNS1123 subdomains
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
+ maxItems: 256
type: array
packageName:
description: |-
packageName is a reference to the name of the package to be installed
and is used to filter the content from catalogs.
- This field is required, immutable and follows the DNS subdomain name
- standard as defined in [RFC 1123]. This means that valid entries:
- - Contain no more than 253 characters
- - Contain only lowercase alphanumeric characters, '-', or '.'
- - Start with an alphanumeric character
- - End with an alphanumeric character
+ packageName is required, immutable, and follows the DNS subdomain standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters.
Some examples of valid values are:
- some-package
@@ -291,11 +261,12 @@ spec:
[RFC 1123]: https://tools.ietf.org/html/rfc1123
maxLength: 253
- pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
x-kubernetes-validations:
- message: packageName is immutable
rule: self == oldSelf
+ - message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
selector:
description: |-
selector is an optional field that can be used
@@ -353,7 +324,7 @@ spec:
the upgrade path(s) defined in the catalog are enforced for the package
referenced in the packageName field.
- Allowed values are: ["CatalogProvided", "SelfCertified"].
+ Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
When this field is set to "CatalogProvided", automatic upgrades will only occur
when upgrade constraints specified by the package author are met.
@@ -365,7 +336,7 @@ spec:
loss. It is assumed that users have independently verified changes when
using this option.
- If unspecified, the default value is "CatalogProvided".
+ When this field is omitted, the default value is "CatalogProvided".
enum:
- CatalogProvided
- SelfCertified
@@ -446,8 +417,10 @@ spec:
For more information on semver, please see https://semver.org/
maxLength: 64
- pattern: ^(\s*(=||!=|>|<|>=|=>|<=|=<|~|~>|\^)\s*(v?(0|[1-9]\d*|[x|X|\*])(\.(0|[1-9]\d*|x|X|\*]))?(\.(0|[1-9]\d*|x|X|\*))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)((?:\s+|,\s*|\s*\|\|\s*)(=||!=|>|<|>=|=>|<=|=<|~|~>|\^)\s*(v?(0|[1-9]\d*|x|X|\*])(\.(0|[1-9]\d*|x|X|\*))?(\.(0|[1-9]\d*|x|X|\*]))?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?)\s*)*$
type: string
+ x-kubernetes-validations:
+ - message: invalid version expression
+ rule: self.matches("^(\\s*(=||!=|>|<|>=|=>|<=|=<|~|~>|\\^)\\s*(v?(0|[1-9]\\d*|[x|X|\\*])(\\.(0|[1-9]\\d*|x|X|\\*]))?(\\.(0|[1-9]\\d*|x|X|\\*))?(-([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?(\\+([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?)\\s*)((?:\\s+|,\\s*|\\s*\\|\\|\\s*)(=||!=|>|<|>=|=>|<=|=<|~|~>|\\^)\\s*(v?(0|[1-9]\\d*|x|X|\\*])(\\.(0|[1-9]\\d*|x|X|\\*))?(\\.(0|[1-9]\\d*|x|X|\\*]))?(-([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?(\\+([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?)\\s*)*$")
required:
- packageName
type: object
@@ -455,11 +428,12 @@ spec:
description: |-
sourceType is a required reference to the type of install source.
- Allowed values are ["Catalog"]
+ Allowed values are "Catalog"
- When this field is set to "Catalog", information for determining the appropriate
- bundle of content to install will be fetched from ClusterCatalog resources existing
- on the cluster. When using the Catalog sourceType, the catalog field must also be set.
+ When this field is set to "Catalog", information for determining the
+ appropriate bundle of content to install will be fetched from
+ ClusterCatalog resources existing on the cluster.
+ When using the Catalog sourceType, the catalog field must also be set.
enum:
- Catalog
type: string
@@ -467,42 +441,35 @@ spec:
- sourceType
type: object
x-kubernetes-validations:
- - message: sourceType Catalog requires catalog field
- rule: self.sourceType == 'Catalog' && has(self.catalog)
+ - message: catalog is required when sourceType is Catalog, and forbidden otherwise
+ rule: 'has(self.sourceType) && self.sourceType == ''Catalog'' ? has(self.catalog) : !has(self.catalog)'
required:
- - install
+ - namespace
+ - serviceAccount
- source
type: object
status:
- description: ClusterExtensionStatus defines the observed state of ClusterExtension.
+ description: status is an optional field that defines the observed state of the ClusterExtension.
properties:
conditions:
description: |-
- conditions is a representation of the current state for this ClusterExtension.
- The status is represented by a set of "conditions".
-
- Each condition is generally structured in the following format:
- - Type: a string representation of the condition type. More or less the condition "name".
- - Status: a string representation of the state of the condition. Can be one of ["True", "False", "Unknown"].
- - Reason: a string representation of the reason for the current state of the condition. Typically useful for building automation around particular Type+Reason combinations.
- - Message: a human readable message that further elaborates on the state of the condition
-
- The global set of condition types are:
- - "Installed", represents whether or not the a bundle has been installed for this ClusterExtension
- - "Progressing", represents whether or not the ClusterExtension is progressing towards a new state
-
- When the ClusterExtension is sourced from a catalog, the following conditions are also possible:
- - "Deprecated", represents an aggregation of the PackageDeprecated, ChannelDeprecated, and BundleDeprecated condition types
- - "PackageDeprecated", represents whether or not the package specified in the spec.source.catalog.packageName field has been deprecated
- - "ChannelDeprecated", represents whether or not any channel specified in spec.source.catalog.channels has been deprecated
- - "BundleDeprecated", represents whether or not the installed bundle is deprecated
-
- The current set of reasons are:
- - "Succeeded", this reason is set on the "Installed" and "Progressing" conditions when initial installation and progressing to a new state is successful
- - "Failed", this reason is set on the "Installed" condition when an error has occurred while performing the initial installation.
- - "Blocked", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that requires manual intervention for recovery
- - "Retrying", this reason is set on the "Progressing" condition when the ClusterExtension controller has encountered an error that could be resolved on subsequent reconciliation attempts
- - "Deprecated", this reason is set on the "Deprecated", "PackageDeprecated", "ChannelDeprecated", and "BundleDeprecated" conditions to signal that the installed package has been deprecated at the particular scope
+ The set of condition types which apply to all spec.source variations are Installed and Progressing.
+
+ The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
+ When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
+ When Installed is False and the Reason is Failed, the bundle has failed to install.
+
+ The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
+ When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
+ When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
+ When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
+
+ When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
+ These are indications from a package owner to guide users away from a particular package, channel, or bundle.
+ BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
+ ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
+ PackageDeprecated is set if the requested package is marked deprecated in the catalog.
+ Deprecated is a rollup condition that is present when any of the deprecated conditions are present.
items:
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
@@ -561,24 +528,33 @@ spec:
- type
x-kubernetes-list-type: map
install:
+ description: install is a representation of the current installation status for this ClusterExtension.
properties:
bundle:
description: |-
- bundle is a representation of the currently installed bundle.
+ bundle is a required field which represents the identifying attributes of a bundle.
A "bundle" is a versioned set of content that represents the resources that
need to be applied to a cluster to install a package.
properties:
name:
description: |-
- name is a required field and is a reference
- to the name of a bundle
+ name is required and follows the DNS subdomain standard
+ as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
+ hyphens (-) or periods (.), start and end with an alphanumeric character,
+ and be no longer than 253 characters.
type: string
+ x-kubernetes-validations:
+ - message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters
+ rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$")
version:
description: |-
- version is a required field and is a reference
- to the version that this bundle represents
+ version is a required field and is a reference to the version that this bundle represents
+ version follows the semantic versioning standard as defined in https://semver.org/.
type: string
+ x-kubernetes-validations:
+ - message: version must be well-formed semver
+ rule: self.matches("^([0-9]+)(\\.[0-9]+)?(\\.[0-9]+)?(-([-0-9A-Za-z]+(\\.[-0-9A-Za-z]+)*))?(\\+([-0-9A-Za-z]+(-\\.[-0-9A-Za-z]+)*))?")
required:
- name
- version
diff --git a/requirements.txt b/requirements.txt
index 906260fd9..2fd8b6c75 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,9 +14,9 @@ markdown2==2.5.1
MarkupSafe==3.0.2
mergedeep==1.3.4
mkdocs==1.6.1
-mkdocs-material==9.5.43
+mkdocs-material==9.5.44
mkdocs-material-extensions==1.3.1
-packaging==24.1
+packaging==24.2
paginate==0.5.7
pathspec==0.12.1
platformdirs==4.3.6
@@ -27,7 +27,7 @@ python-dateutil==2.9.0.post0
PyYAML==6.0.2
pyyaml_env_tag==0.1
readtime==3.0.0
-regex==2024.9.11
+regex==2024.11.6
requests==2.32.3
six==1.16.0
soupsieve==2.6
diff --git a/scripts/install.tpl.sh b/scripts/install.tpl.sh
index c1907ddc9..c3525dbcb 100644
--- a/scripts/install.tpl.sh
+++ b/scripts/install.tpl.sh
@@ -41,13 +41,40 @@ function kubectl_wait_rollout() {
kubectl rollout status --namespace="${namespace}" "${runtime}" --timeout="${timeout}"
}
+function kubectl_wait_for_query() {
+ manifest=$1
+ query=$2
+ timeout=$3
+ poll_interval_in_seconds=$4
+
+ if [[ -z "$manifest" || -z "$query" || -z "$timeout" || -z "$poll_interval_in_seconds" ]]; then
+ echo "Error: Missing arguments."
+ echo "Usage: kubectl_wait_for_query "
+ exit 1
+ fi
+
+ start_time=$(date +%s)
+ while true; do
+ val=$(kubectl get "${manifest}" -o jsonpath="${query}" 2>/dev/null || echo "")
+ if [[ -n "${val}" ]]; then
+ echo "${manifest} has ${query}."
+ break
+ fi
+ if [[ $(( $(date +%s) - start_time )) -ge ${timeout} ]]; then
+ echo "Timed out waiting for ${manifest} to have ${query}."
+ exit 1
+ fi
+ sleep ${poll_interval_in_seconds}s
+ done
+}
+
kubectl apply -f "https://github.com/cert-manager/cert-manager/releases/download/${cert_mgr_version}/cert-manager.yaml"
# Wait for cert-manager to be fully ready
kubectl_wait "cert-manager" "deployment/cert-manager-webhook" "60s"
kubectl_wait "cert-manager" "deployment/cert-manager-cainjector" "60s"
kubectl_wait "cert-manager" "deployment/cert-manager" "60s"
-kubectl wait mutatingwebhookconfigurations/cert-manager-webhook --for=jsonpath='{.webhooks[0].clientConfig.caBundle}' --timeout=60s
-kubectl wait validatingwebhookconfigurations/cert-manager-webhook --for=jsonpath='{.webhooks[0].clientConfig.caBundle}' --timeout=60s
+kubectl_wait_for_query "mutatingwebhookconfigurations/cert-manager-webhook" '{.webhooks[0].clientConfig.caBundle}' 60 5
+kubectl_wait_for_query "validatingwebhookconfigurations/cert-manager-webhook" '{.webhooks[0].clientConfig.caBundle}' 60 5
kubectl apply -f "https://github.com/operator-framework/catalogd/releases/download/${catalogd_version}/catalogd.yaml"
# Wait for the rollout, and then wait for the deployment to be Available
diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go
index d03aacf5e..cecb7f219 100644
--- a/test/e2e/cluster_extension_install_test.go
+++ b/test/e2e/cluster_extension_install_test.go
@@ -17,18 +17,20 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/utils/env"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
)
const (
@@ -38,6 +40,19 @@ const (
var pollDuration = time.Minute
var pollInterval = time.Second
+func createNamespace(ctx context.Context, name string) (*corev1.Namespace, error) {
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+ err := c.Create(ctx, ns)
+ if err != nil {
+ return nil, err
+ }
+ return ns, nil
+}
+
func createServiceAccount(ctx context.Context, name types.NamespacedName, clusterExtensionName string) (*corev1.ServiceAccount, error) {
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
@@ -177,44 +192,93 @@ func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *core
return nil
}
-func testInit(t *testing.T) (*ocv1alpha1.ClusterExtension, *catalogd.ClusterCatalog, *corev1.ServiceAccount) {
+func testInit(t *testing.T) (*ocv1.ClusterExtension, *catalogd.ClusterCatalog, *corev1.ServiceAccount, *corev1.Namespace) {
var err error
- extensionCatalog, err := createTestCatalog(context.Background(), testCatalogName, os.Getenv(testCatalogRefEnvVar))
- require.NoError(t, err)
clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+
+ ns, err := createNamespace(context.Background(), clusterExtensionName)
+ require.NoError(t, err)
+
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: clusterExtensionName,
},
}
- defaultNamespace := types.NamespacedName{
+ extensionCatalog, err := createTestCatalog(context.Background(), testCatalogName, os.Getenv(testCatalogRefEnvVar))
+ require.NoError(t, err)
+
+ name := types.NamespacedName{
Name: clusterExtensionName,
- Namespace: "default",
+ Namespace: ns.GetName(),
}
- sa, err := createServiceAccount(context.Background(), defaultNamespace, clusterExtensionName)
+ sa, err := createServiceAccount(context.Background(), name, clusterExtensionName)
require.NoError(t, err)
- return clusterExtension, extensionCatalog, sa
+ return clusterExtension, extensionCatalog, sa, ns
}
-func testCleanup(t *testing.T, cat *catalogd.ClusterCatalog, clusterExtension *ocv1alpha1.ClusterExtension, sa *corev1.ServiceAccount) {
+func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) {
+ ls := labels.Set{"olm.operatorframework.io/owner-name": clusterExtensionName}
+
+ // CRDs may take an extra long time to be deleted, and may run into the following error:
+ // Condition=Terminating Status=True Reason=InstanceDeletionFailed Message="could not list instances: storage is (re)initializing"
+ t.Logf("By waiting for CustomResourceDefinitions of %q to be deleted", clusterExtensionName)
+ require.EventuallyWithT(t, func(ct *assert.CollectT) {
+ list := &apiextensionsv1.CustomResourceDefinitionList{}
+ err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()})
+ assert.NoError(ct, err)
+ assert.Empty(ct, list.Items)
+ }, 5*pollDuration, pollInterval)
+
+ t.Logf("By waiting for ClusterRoleBindings of %q to be deleted", clusterExtensionName)
+ require.EventuallyWithT(t, func(ct *assert.CollectT) {
+ list := &rbacv1.ClusterRoleBindingList{}
+ err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()})
+ assert.NoError(ct, err)
+ assert.Empty(ct, list.Items)
+ }, 2*pollDuration, pollInterval)
+
+ t.Logf("By waiting for ClusterRoles of %q to be deleted", clusterExtensionName)
+ require.EventuallyWithT(t, func(ct *assert.CollectT) {
+ list := &rbacv1.ClusterRoleList{}
+ err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()})
+ assert.NoError(ct, err)
+ assert.Empty(ct, list.Items)
+ }, 2*pollDuration, pollInterval)
+}
+
+func testCleanup(t *testing.T, cat *catalogd.ClusterCatalog, clusterExtension *ocv1.ClusterExtension, sa *corev1.ServiceAccount, ns *corev1.Namespace) {
+ t.Logf("By deleting ClusterCatalog %q", cat.Name)
require.NoError(t, c.Delete(context.Background(), cat))
require.Eventually(t, func() bool {
err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &catalogd.ClusterCatalog{})
return errors.IsNotFound(err)
}, pollDuration, pollInterval)
+
+ t.Logf("By deleting ClusterExtension %q", clusterExtension.Name)
require.NoError(t, c.Delete(context.Background(), clusterExtension))
require.Eventually(t, func() bool {
- err := c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1alpha1.ClusterExtension{})
+ err := c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{})
return errors.IsNotFound(err)
}, pollDuration, pollInterval)
+
+ t.Logf("By deleting ServiceAccount %q", sa.Name)
require.NoError(t, c.Delete(context.Background(), sa))
require.Eventually(t, func() bool {
err := c.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{})
return errors.IsNotFound(err)
}, pollDuration, pollInterval)
+
+ ensureNoExtensionResources(t, clusterExtension.Name)
+
+ t.Logf("By deleting Namespace %q", ns.Name)
+ require.NoError(t, c.Delete(context.Background(), ns))
+ require.Eventually(t, func() bool {
+ err := c.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{})
+ return errors.IsNotFound(err)
+ }, pollDuration, pollInterval)
}
func TestClusterExtensionInstallRegistry(t *testing.T) {
@@ -240,25 +304,23 @@ func TestClusterExtensionInstallRegistry(t *testing.T) {
t.Log("When a cluster extension is installed from a catalog")
t.Log("When the extension bundle format is registry+v1")
- clusterExtension, extensionCatalog, sa := testInit(t)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ clusterExtension, extensionCatalog, sa, ns := testInit(t)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: tc.packageName,
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
t.Log("It resolves the specified package with correct bundle path")
@@ -270,23 +332,23 @@ func TestClusterExtensionInstallRegistry(t *testing.T) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
}, pollDuration, pollInterval)
- t.Log("By eventually reporting no longer progressing")
+ t.Log("By eventually reporting progressing as True")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
t.Log("By eventually installing the package successfully")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
if assert.NotNil(ct, cond) {
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
assert.Contains(ct, cond.Message, "Installed bundle")
assert.NotEmpty(ct, clusterExtension.Status.Install.Bundle)
}
@@ -298,22 +360,20 @@ func TestClusterExtensionInstallRegistry(t *testing.T) {
func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) {
t.Log("When a cluster extension is installed from a catalog")
- clusterExtension, extensionCatalog, sa := testInit(t)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ clusterExtension, extensionCatalog, sa, ns := testInit(t)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
t.Log("It resolves to multiple bundle paths")
@@ -328,10 +388,10 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) {
t.Log("By eventually reporting Progressing == True and Reason Retrying")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonRetrying, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
assert.Contains(ct, cond.Message, "in multiple catalogs with the same priority [operatorhubio test-catalog]")
}
}, pollDuration, pollInterval)
@@ -341,25 +401,23 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) {
t.Log("When a cluster extension is installed from a catalog")
t.Log("When resolving upgrade edges")
- clusterExtension, extensionCatalog, sa := testInit(t)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ clusterExtension, extensionCatalog, sa, ns := testInit(t)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
t.Log("By creating an ClusterExtension at a specified version")
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
Version: "1.0.0",
// No Selector since this is an exact version match
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
require.NoError(t, c.Create(context.Background(), clusterExtension))
@@ -367,17 +425,17 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) {
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
assert.Equal(ct,
- &ocv1alpha1.ClusterExtensionInstallStatus{Bundle: ocv1alpha1.BundleMetadata{
+ &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{
Name: "prometheus-operator.1.0.0",
Version: "1.0.0",
}},
clusterExtension.Status.Install,
)
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
@@ -394,9 +452,9 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) {
t.Log("By eventually reporting Progressing == True and Reason Retrying")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, ocv1alpha1.ReasonRetrying, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
assert.Equal(ct, "error upgrading from currently installed version \"1.0.0\": no bundles found for package \"prometheus\" matching version \"1.2.0\"", cond.Message)
}
}, pollDuration, pollInterval)
@@ -406,34 +464,32 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) {
t.Log("When a cluster extension is installed from a catalog")
t.Log("When resolving upgrade edges")
- clusterExtension, extensionCatalog, sa := testInit(t)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ clusterExtension, extensionCatalog, sa, ns := testInit(t)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
t.Log("By creating an ClusterExtension at a specified version")
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
Version: "1.0.0",
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
require.NoError(t, c.Create(context.Background(), clusterExtension))
t.Log("By eventually reporting a successful resolution")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
@@ -441,15 +497,15 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) {
t.Log("By updating the ClusterExtension resource to a non-successor version")
// 1.2.0 does not replace/skip/skipRange 1.0.0.
clusterExtension.Spec.Source.Catalog.Version = "1.2.0"
- clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1alpha1.UpgradeConstraintPolicySelfCertified
+ clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified
require.NoError(t, c.Update(context.Background(), clusterExtension))
t.Log("By eventually reporting a satisfiable resolution")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
}
@@ -457,34 +513,32 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) {
func TestClusterExtensionInstallSuccessorVersion(t *testing.T) {
t.Log("When a cluster extension is installed from a catalog")
t.Log("When resolving upgrade edges")
- clusterExtension, extensionCatalog, sa := testInit(t)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ clusterExtension, extensionCatalog, sa, ns := testInit(t)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
t.Log("By creating an ClusterExtension at a specified version")
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
Version: "1.0.0",
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
require.NoError(t, c.Create(context.Background(), clusterExtension))
t.Log("By eventually reporting a successful resolution")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
@@ -496,10 +550,10 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) {
t.Log("By eventually reporting a successful resolution and bundle path")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
}
@@ -507,16 +561,16 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) {
func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) {
t.Log("When a cluster extension is installed from a catalog")
t.Log("It resolves again when a catalog is patched with new ImageRef")
- clusterExtension, extensionCatalog, sa := testInit(t)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ clusterExtension, extensionCatalog, sa, ns := testInit(t)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "olm.operatorframework.io/metadata.name",
@@ -527,11 +581,9 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) {
},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
t.Log("It resolves the specified package with correct bundle path")
@@ -541,16 +593,16 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) {
t.Log("By reporting a successful resolution and bundle path")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
// patch imageRef tag on test-catalog image with v2 image
t.Log("By patching the catalog ImageRef to point to the v2 catalog")
- updatedCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:v2", os.Getenv("LOCAL_REGISTRY_HOST"))
+ updatedCatalogImage := fmt.Sprintf("%s/test-catalog:v2", os.Getenv("LOCAL_REGISTRY_HOST"))
err := patchTestCatalog(context.Background(), testCatalogName, updatedCatalogImage)
require.NoError(t, err)
require.EventuallyWithT(t, func(ct *assert.CollectT) {
@@ -565,10 +617,10 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) {
t.Log("By eventually reporting a successful resolution and bundle path")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
}
@@ -588,31 +640,31 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) {
extensionCatalog, err := createTestCatalog(context.Background(), testCatalogName, latestCatalogImage)
require.NoError(t, err)
clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: clusterExtensionName,
},
}
- sa, err := createServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: "default"}, clusterExtensionName)
+ ns, err := createNamespace(context.Background(), clusterExtensionName)
require.NoError(t, err)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ sa, err := createServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName)
+ require.NoError(t, err)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
t.Log("It resolves the specified package with correct bundle path")
@@ -622,10 +674,10 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) {
t.Log("By reporting a successful resolution and bundle path")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
@@ -646,10 +698,10 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) {
t.Log("By eventually reporting a successful resolution and bundle path")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
}
@@ -657,25 +709,23 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) {
func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T) {
t.Log("When a cluster extension is installed from a catalog")
t.Log("It resolves again when managed content is changed")
- clusterExtension, extensionCatalog, sa := testInit(t)
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ clusterExtension, extensionCatalog, sa, ns := testInit(t)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
t.Log("It installs the specified package with correct bundle path")
@@ -685,10 +735,10 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T
t.Log("By reporting a successful installation")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
if assert.NotNil(ct, cond) {
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
assert.Contains(ct, cond.Message, "Installed bundle")
}
}, pollDuration, pollInterval)
@@ -697,7 +747,7 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T
prometheusService := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "prometheus-operator",
- Namespace: clusterExtension.Spec.Install.Namespace,
+ Namespace: clusterExtension.Spec.Namespace,
},
}
require.NoError(t, c.Delete(context.Background(), prometheusService))
@@ -712,35 +762,33 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes
t.Log("When a cluster extension is installed from a catalog")
t.Log("When the extension bundle format is registry+v1")
- clusterExtension, extensionCatalog, _ := testInit(t)
+ clusterExtension, extensionCatalog, _, ns := testInit(t)
+
name := rand.String(10)
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: name,
- Namespace: "default",
+ Namespace: ns.Name,
},
}
err := c.Create(context.Background(), sa)
require.NoError(t, err)
-
- defer testCleanup(t, extensionCatalog, clusterExtension, sa)
+ defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns)
defer getArtifactsOutput(t)
- clusterExtension.Spec = ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ clusterExtension.Spec = ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: "prometheus",
- Selector: metav1.LabelSelector{
+ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name},
},
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: "default",
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: ns.Name,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
}
t.Log("It resolves the specified package with correct bundle path")
@@ -755,20 +803,20 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes
t.Log("By eventually reporting Progressing == True with Reason Retrying")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonRetrying, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonRetrying, cond.Reason)
}
}, pollDuration, pollInterval)
t.Log("By eventually failing to install the package successfully due to insufficient ServiceAccount permissions")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
if assert.NotNil(ct, cond) {
assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonFailed, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonFailed, cond.Reason)
assert.Equal(ct, "No bundle installed", cond.Message)
}
}, pollDuration, pollInterval)
@@ -783,22 +831,22 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes
t.Log("By eventually installing the package successfully")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
if assert.NotNil(ct, cond) {
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
assert.Contains(ct, cond.Message, "Installed bundle")
assert.NotEmpty(ct, clusterExtension.Status.Install)
}
}, pollDuration, pollInterval)
- t.Log("By eventually reporting Progressing == False with Reason Success")
+ t.Log("By eventually reporting Progressing == True with Reason Success")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeProgressing)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing)
if assert.NotNil(ct, cond) {
- assert.Equal(ct, metav1.ConditionFalse, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, metav1.ConditionTrue, cond.Status)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}
}, pollDuration, pollInterval)
}
@@ -834,7 +882,7 @@ func getArtifactsOutput(t *testing.T) {
}
// get all cluster extensions save them to the artifact path.
- clusterExtensions := ocv1alpha1.ClusterExtensionList{}
+ clusterExtensions := ocv1.ClusterExtensionList{}
if err := c.List(context.Background(), &clusterExtensions, client.InNamespace("")); err != nil {
fmt.Printf("Failed to list cluster extensions: %v", err)
}
diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go
index b292c4653..df6d3fdd9 100644
--- a/test/e2e/e2e_suite_test.go
+++ b/test/e2e/e2e_suite_test.go
@@ -4,15 +4,16 @@ import (
"context"
"os"
"testing"
- "time"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/rest"
+ "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
"github.com/operator-framework/operator-controller/internal/scheme"
)
@@ -32,6 +33,7 @@ func TestMain(m *testing.M) {
cfg = ctrl.GetConfigOrDie()
var err error
+ utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme))
c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
utilruntime.Must(err)
@@ -53,8 +55,8 @@ func createTestCatalog(ctx context.Context, name string, imageRef string) (*cata
Source: catalogd.CatalogSource{
Type: catalogd.SourceTypeImage,
Image: &catalogd.ImageSource{
- Ref: imageRef,
- PollInterval: &metav1.Duration{Duration: time.Second},
+ Ref: imageRef,
+ PollIntervalMinutes: ptr.To(1),
},
},
},
diff --git a/test/extension-developer-e2e/extension_developer_test.go b/test/extension-developer-e2e/extension_developer_test.go
index 4a11a7530..6974983a0 100644
--- a/test/extension-developer-e2e/extension_developer_test.go
+++ b/test/extension-developer-e2e/extension_developer_test.go
@@ -18,9 +18,9 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
)
func TestExtensionDeveloper(t *testing.T) {
@@ -30,7 +30,7 @@ func TestExtensionDeveloper(t *testing.T) {
scheme := runtime.NewScheme()
require.NoError(t, catalogd.AddToScheme(scheme))
- require.NoError(t, ocv1alpha1.AddToScheme(scheme))
+ require.NoError(t, ocv1.AddToScheme(scheme))
require.NoError(t, corev1.AddToScheme(scheme))
require.NoError(t, rbacv1.AddToScheme(scheme))
@@ -64,22 +64,20 @@ func TestExtensionDeveloper(t *testing.T) {
}
require.NoError(t, c.Create(ctx, sa))
- clusterExtension := &ocv1alpha1.ClusterExtension{
+ clusterExtension := &ocv1.ClusterExtension{
ObjectMeta: metav1.ObjectMeta{
Name: "registryv1",
},
- Spec: ocv1alpha1.ClusterExtensionSpec{
- Source: ocv1alpha1.SourceConfig{
+ Spec: ocv1.ClusterExtensionSpec{
+ Source: ocv1.SourceConfig{
SourceType: "Catalog",
- Catalog: &ocv1alpha1.CatalogSource{
+ Catalog: &ocv1.CatalogSource{
PackageName: os.Getenv("REG_PKG_NAME"),
},
},
- Install: ocv1alpha1.ClusterExtensionInstallConfig{
- Namespace: installNamespace,
- ServiceAccount: ocv1alpha1.ServiceAccountReference{
- Name: sa.Name,
- },
+ Namespace: installNamespace,
+ ServiceAccount: ocv1.ServiceAccountReference{
+ Name: sa.Name,
},
},
}
@@ -202,14 +200,14 @@ func TestExtensionDeveloper(t *testing.T) {
require.NoError(t, c.Create(context.Background(), clusterExtension))
t.Log("It should have a status condition type of Installed with a status of True and a reason of Success")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
- ext := &ocv1alpha1.ClusterExtension{}
+ ext := &ocv1.ClusterExtension{}
assert.NoError(ct, c.Get(context.Background(), client.ObjectKeyFromObject(clusterExtension), ext))
- cond := meta.FindStatusCondition(ext.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond := meta.FindStatusCondition(ext.Status.Conditions, ocv1.TypeInstalled)
if !assert.NotNil(ct, cond) {
return
}
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
}, 2*time.Minute, time.Second)
require.NoError(t, c.Delete(context.Background(), catalog))
require.NoError(t, c.Delete(context.Background(), clusterExtension))
diff --git a/test/upgrade-e2e/post_upgrade_test.go b/test/upgrade-e2e/post_upgrade_test.go
index 78f7284a8..ace977d13 100644
--- a/test/upgrade-e2e/post_upgrade_test.go
+++ b/test/upgrade-e2e/post_upgrade_test.go
@@ -18,9 +18,9 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogdv1alpha1 "github.com/operator-framework/catalogd/api/core/v1alpha1"
+ catalogd "github.com/operator-framework/catalogd/api/v1"
- ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1"
+ ocv1 "github.com/operator-framework/operator-controller/api/v1"
)
func TestClusterExtensionAfterOLMUpgrade(t *testing.T) {
@@ -65,26 +65,26 @@ func TestClusterExtensionAfterOLMUpgrade(t *testing.T) {
t.Log("Checking that the ClusterCatalog is serving")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
- var clusterCatalog catalogdv1alpha1.ClusterCatalog
+ var clusterCatalog catalogd.ClusterCatalog
assert.NoError(ct, c.Get(ctx, types.NamespacedName{Name: testClusterCatalogName}, &clusterCatalog))
- cond := apimeta.FindStatusCondition(clusterCatalog.Status.Conditions, catalogdv1alpha1.TypeServing)
+ cond := apimeta.FindStatusCondition(clusterCatalog.Status.Conditions, catalogd.TypeServing)
if !assert.NotNil(ct, cond) {
return
}
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, catalogdv1alpha1.ReasonAvailable, cond.Reason)
+ assert.Equal(ct, catalogd.ReasonAvailable, cond.Reason)
}, time.Minute, time.Second)
t.Log("Checking that the ClusterExtension is installed")
- var clusterExtension ocv1alpha1.ClusterExtension
+ var clusterExtension ocv1.ClusterExtension
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(ctx, types.NamespacedName{Name: testClusterExtensionName}, &clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
if !assert.NotNil(ct, cond) {
return
}
assert.Equal(ct, metav1.ConditionTrue, cond.Status)
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
assert.Contains(ct, cond.Message, "Installed bundle")
if assert.NotNil(ct, clusterExtension.Status.Install) {
assert.NotEmpty(ct, clusterExtension.Status.Install.Bundle.Version)
@@ -101,13 +101,13 @@ func TestClusterExtensionAfterOLMUpgrade(t *testing.T) {
t.Log("Checking that the ClusterExtension installs successfully")
require.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.NoError(ct, c.Get(ctx, types.NamespacedName{Name: testClusterExtensionName}, &clusterExtension))
- cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1alpha1.TypeInstalled)
+ cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled)
if !assert.NotNil(ct, cond) {
return
}
- assert.Equal(ct, ocv1alpha1.ReasonSucceeded, cond.Reason)
+ assert.Equal(ct, ocv1.ReasonSucceeded, cond.Reason)
assert.Contains(ct, cond.Message, "Installed bundle")
- assert.Equal(ct, ocv1alpha1.BundleMetadata{Name: "prometheus-operator.1.0.1", Version: "1.0.1"}, clusterExtension.Status.Install.Bundle)
+ assert.Equal(ct, ocv1.BundleMetadata{Name: "prometheus-operator.1.0.1", Version: "1.0.1"}, clusterExtension.Status.Install.Bundle)
assert.NotEqual(ct, previousVersion, clusterExtension.Status.Install.Bundle.Version)
}, time.Minute, time.Second)
}
diff --git a/testdata/.gitignore b/testdata/.gitignore
new file mode 100644
index 000000000..1eca1dc7e
--- /dev/null
+++ b/testdata/.gitignore
@@ -0,0 +1,2 @@
+push/bin
+registry/bin
diff --git a/testdata/Dockerfile b/testdata/Dockerfile
new file mode 100644
index 000000000..0f1355f56
--- /dev/null
+++ b/testdata/Dockerfile
@@ -0,0 +1,12 @@
+from gcr.io/distroless/static:nonroot
+
+WORKDIR /
+
+COPY registry/bin/registry registry
+COPY push/bin/push push
+
+COPY images images
+
+EXPOSE 5000
+
+USER 65532:65532
diff --git a/hack/test/image-registry.sh b/testdata/build-test-registry.sh
similarity index 61%
rename from hack/test/image-registry.sh
rename to testdata/build-test-registry.sh
index ac3e68eb7..3ea0e65b0 100755
--- a/hack/test/image-registry.sh
+++ b/testdata/build-test-registry.sh
@@ -1,22 +1,21 @@
-#! /bin/bash
+#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
help="
-image-registry.sh is a script to stand up an image registry within a cluster.
+build-test-registry.sh is a script to stand up an image registry within a cluster.
Usage:
- image-registry.sh [NAMESPACE] [NAME] [CERT_REF]
+ build-test-registry.sh [NAMESPACE] [NAME] [IMAGE]
Argument Descriptions:
- NAMESPACE is the namespace that should be created and is the namespace in which the image registry will be created
- NAME is the name that should be used for the image registry Deployment and Service
- - CERT_REF is the reference to the CA certificate that should be used to serve the image registry over HTTPS, in the
- format of 'Issuer/' or 'ClusterIssuer/'
+ - IMAGE is the name of the image that should be used to run the image registry
"
-if [[ "$#" -ne 2 ]]; then
+if [[ "$#" -ne 3 ]]; then
echo "Illegal number of arguments passed"
echo "${help}"
exit 1
@@ -24,6 +23,7 @@ fi
namespace=$1
name=$2
+image=$3
kubectl apply -f - << EOF
apiVersion: v1
@@ -69,7 +69,12 @@ spec:
spec:
containers:
- name: registry
- image: registry:2
+ image: ${image}
+ imagePullPolicy: IfNotPresent
+ command:
+ - /registry
+ args:
+ - "--registry-address=:5000"
volumeMounts:
- name: certs-vol
mountPath: "/certs"
@@ -100,3 +105,35 @@ spec:
EOF
kubectl wait --for=condition=Available -n "${namespace}" "deploy/${name}" --timeout=60s
+
+kubectl apply -f - << EOF
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: ${name}-push
+ namespace: "${namespace}"
+spec:
+ template:
+ spec:
+ restartPolicy: Never
+ containers:
+ - name: push
+ image: ${image}
+ command:
+ - /push
+ args:
+ - "--registry-address=${name}.${namespace}.svc:5000"
+ - "--images-path=/images"
+ volumeMounts:
+ - name: certs-vol
+ mountPath: "/certs"
+ env:
+ - name: SSL_CERT_DIR
+ value: "/certs/"
+ volumes:
+ - name: certs-vol
+ secret:
+ secretName: ${namespace}-registry
+EOF
+
+kubectl wait --for=condition=Complete -n "${namespace}" "job/${name}-push" --timeout=60s
diff --git a/testdata/bundles/registry-v1/build-push-e2e-bundle.sh b/testdata/bundles/registry-v1/build-push-e2e-bundle.sh
deleted file mode 100755
index 0aec13cc9..000000000
--- a/testdata/bundles/registry-v1/build-push-e2e-bundle.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#! /bin/bash
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-help="
-build-push-e2e-bundle.sh is a script to build and push the e2e bundle image using kaniko.
-Usage:
- build-push-e2e-bundle.sh [NAMESPACE] [TAG] [BUNDLE_NAME] [BUNDLE_DIR]
-
-Argument Descriptions:
- - NAMESPACE is the namespace the kaniko Job should be created in
- - TAG is the full tag used to build and push the catalog image
-"
-
-if [[ "$#" -ne 4 ]]; then
- echo "Illegal number of arguments passed"
- echo "${help}"
- exit 1
-fi
-
-
-namespace=$1
-tag=$2
-bundle_name=$3
-package_name=$4
-bundle_dir="testdata/bundles/registry-v1/${package_name}"
-
-echo "${namespace}" "${tag}"
-
-kubectl create configmap -n "${namespace}" --from-file="${bundle_dir}/Dockerfile" operator-controller-e2e-${bundle_name}.root
-
-tgz="${bundle_dir}/manifests.tgz"
-tar czf "${tgz}" -C "${bundle_dir}/" manifests metadata
-kubectl create configmap -n "${namespace}" --from-file="${tgz}" operator-controller-${bundle_name}.manifests
-rm "${tgz}"
-
-# Remove periods from bundle name due to pod name issues
-job_name=${bundle_name//.}
-
-kubectl apply -f - << EOF
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: "kaniko-${job_name}"
- namespace: "${namespace}"
-spec:
- template:
- spec:
- initContainers:
- - name: copy-manifests
- image: busybox
- command: ['sh', '-c', 'cp /manifests-data/* /manifests']
- volumeMounts:
- - name: manifests
- mountPath: /manifests
- - name: manifests-data
- mountPath: /manifests-data
- containers:
- - name: kaniko
- image: gcr.io/kaniko-project/executor:latest
- args: ["--dockerfile=/workspace/Dockerfile",
- "--context=tar:///workspace/manifests/manifests.tgz",
- "--destination=${tag}",
- "--skip-tls-verify"]
- volumeMounts:
- - name: dockerfile
- mountPath: /workspace/
- - name: manifests
- mountPath: /workspace/manifests/
- restartPolicy: Never
- volumes:
- - name: dockerfile
- configMap:
- name: operator-controller-e2e-${bundle_name}.root
- - name: manifests
- emptyDir: {}
- - name: manifests-data
- configMap:
- name: operator-controller-${bundle_name}.manifests
-EOF
-
-kubectl wait --for=condition=Complete -n "${namespace}" jobs/kaniko-${job_name} --timeout=60s
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/Dockerfile b/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/Dockerfile
deleted file mode 100644
index 5a1458148..000000000
--- a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM scratch
-
-# Core bundle labels.
-LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1
-LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/
-LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/
-LABEL operators.operatorframework.io.bundle.package.v1=prometheusoperator
-LABEL operators.operatorframework.io.bundle.channels.v1=beta
-LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.28.0
-LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
-LABEL operators.operatorframework.io.metrics.project_layout=unknown
-
-# Copy files to locations specified by labels.
-COPY manifests /manifests/
-COPY metadata /metadata/
diff --git a/testdata/catalogs/test-catalog-v1.Dockerfile b/testdata/catalogs/test-catalog-v1.Dockerfile
deleted file mode 100644
index d255e0774..000000000
--- a/testdata/catalogs/test-catalog-v1.Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM scratch
-ADD test-catalog-v1 /configs
-
-# Set DC-specific label for the location of the DC root directory
-# in the image
-LABEL operators.operatorframework.io.index.configs.v1=/configs
diff --git a/testdata/catalogs/test-catalog-v2.Dockerfile b/testdata/catalogs/test-catalog-v2.Dockerfile
deleted file mode 100644
index 72b3a7a2e..000000000
--- a/testdata/catalogs/test-catalog-v2.Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM scratch
-ADD test-catalog-v2 /configs
-
-# Set DC-specific label for the location of the DC root directory
-# in the image
-LABEL operators.operatorframework.io.index.configs.v1=/configs
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_alertmanagerconfigs.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_alertmanagerconfigs.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_alertmanagerconfigs.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_alertmanagerconfigs.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_alertmanagers.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_alertmanagers.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_alertmanagers.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_alertmanagers.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_podmonitors.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_podmonitors.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_podmonitors.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_podmonitors.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_probes.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_probes.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_probes.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_probes.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_prometheusagents.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_prometheusagents.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_prometheusagents.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_prometheusagents.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_prometheuses.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_prometheuses.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_prometheuses.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_prometheuses.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_prometheusrules.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_prometheusrules.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_prometheusrules.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_prometheusrules.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_scrapeconfigs.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_scrapeconfigs.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_scrapeconfigs.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_scrapeconfigs.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_servicemonitors.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_servicemonitors.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_servicemonitors.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_servicemonitors.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_thanosrulers.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_thanosrulers.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/monitoring.coreos.com_thanosrulers.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/monitoring.coreos.com_thanosrulers.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/prometheus-operator_v1_service.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/prometheus-operator_v1_service.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/prometheus-operator_v1_service.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/prometheus-operator_v1_service.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/prometheusoperator.clusterserviceversion.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/manifests/prometheusoperator.clusterserviceversion.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/manifests/prometheusoperator.clusterserviceversion.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/manifests/prometheusoperator.clusterserviceversion.yaml
diff --git a/testdata/bundles/registry-v1/prometheus-operator.v1.0.0/metadata/annotations.yaml b/testdata/images/bundles/prometheus-operator/v1.0.0/metadata/annotations.yaml
similarity index 100%
rename from testdata/bundles/registry-v1/prometheus-operator.v1.0.0/metadata/annotations.yaml
rename to testdata/images/bundles/prometheus-operator/v1.0.0/metadata/annotations.yaml
diff --git a/testdata/catalogs/test-catalog-v1/.indexignore b/testdata/images/catalogs/test-catalog/v1/configs/.indexignore
similarity index 100%
rename from testdata/catalogs/test-catalog-v1/.indexignore
rename to testdata/images/catalogs/test-catalog/v1/configs/.indexignore
diff --git a/testdata/catalogs/test-catalog-v1/catalog.yaml b/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml
similarity index 90%
rename from testdata/catalogs/test-catalog-v1/catalog.yaml
rename to testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml
index 41c843d72..ac1613ad1 100644
--- a/testdata/catalogs/test-catalog-v1/catalog.yaml
+++ b/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml
@@ -32,7 +32,7 @@ properties:
schema: olm.bundle
name: prometheus-operator.1.0.1
package: prometheus
-image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v1.0.1
+image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v1.0.0
properties:
- type: olm.package
value:
@@ -42,7 +42,7 @@ properties:
schema: olm.bundle
name: prometheus-operator.1.2.0
package: prometheus
-image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v1.2.0
+image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v1.0.0
properties:
- type: olm.package
value:
@@ -63,7 +63,7 @@ entries:
schema: olm.bundle
name: prometheus-mirrored-operator.1.2.0
package: prometheus-mirrored
-image: mirrored-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v1.2.0
+image: mirrored-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v1.0.0
properties:
- type: olm.package
value:
diff --git a/testdata/catalogs/test-catalog-v2/.indexignore b/testdata/images/catalogs/test-catalog/v2/configs/.indexignore
similarity index 100%
rename from testdata/catalogs/test-catalog-v2/.indexignore
rename to testdata/images/catalogs/test-catalog/v2/configs/.indexignore
diff --git a/testdata/catalogs/test-catalog-v2/catalog.yaml b/testdata/images/catalogs/test-catalog/v2/configs/catalog.yaml
similarity index 89%
rename from testdata/catalogs/test-catalog-v2/catalog.yaml
rename to testdata/images/catalogs/test-catalog/v2/configs/catalog.yaml
index 7208809cc..779d5cc4f 100644
--- a/testdata/catalogs/test-catalog-v2/catalog.yaml
+++ b/testdata/images/catalogs/test-catalog/v2/configs/catalog.yaml
@@ -13,7 +13,7 @@ entries:
schema: olm.bundle
name: prometheus-operator.2.0.0
package: prometheus
-image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v2.0.0
+image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/prometheus-operator:v1.0.0
properties:
- type: olm.package
value:
diff --git a/testdata/push/README.md b/testdata/push/README.md
new file mode 100644
index 000000000..f45f0cc46
--- /dev/null
+++ b/testdata/push/README.md
@@ -0,0 +1,46 @@
+# Test Registry Image Push
+
+This tool builds our test bundle and catalog images via crane. It accepts two command line arguments:
+```
+Usage of push:
+ --images-path string Image directory path (default "/images")
+ --registry-address string The address of the registry. (default ":12345")
+```
+
+`--registry-address` is the address of the registry to be pushed to.
+
+`--images-path` should point to the root directory of the images tree structure. The tool expects a particular directory format in order to work properly. Bundles should be placed in `/bundles`, and catalogs in `/catalogs`. From these directories the same convention should be followed: folders within `[catalogs|bundles]` are image names i.e. `test-catalog`. Within these folders is where each tag for that image should be placed. What that ends up looking like is:
+```bash
+$ tree ./testdata/images/
+./testdata/images/
+├── bundles
+│ └── prometheus-operator
+│ └── v1.0.0
+│ ├── metadata
+│ │ └── annotations.yaml
+│ └── manifests
+│ └── example.yaml
+└── catalogs
+ └── test-catalog
+ ├── v1
+ │ └── configs
+ │ └── catalog.yaml
+ └── v2
+ └── configs
+ └── catalog.yaml
+```
+The inside of each tag folder will be placed directly into `/` of the built container i.e. `test-catalog:v1` will have `/configs/catalog.yaml`.
+
+To add a new image or tag for the tool, create the folders required and populate them with the files to be mounted. Bundle images requiring metadata should contain a `metadata` folder with `annotations.yaml`. Example content:
+```yaml
+annotations:
+ # Core bundle annotations.
+ operators.operatorframework.io.bundle.mediatype.v1: registry+v1
+ operators.operatorframework.io.bundle.manifests.v1: manifests/
+ operators.operatorframework.io.bundle.metadata.v1: metadata/
+ operators.operatorframework.io.bundle.package.v1: prometheus
+ operators.operatorframework.io.bundle.channels.v1: beta
+ operators.operatorframework.io.metrics.builder: operator-sdk-v1.28.0
+ operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
+ operators.operatorframework.io.metrics.project_layout: unknown
+```
diff --git a/testdata/push/go.mod b/testdata/push/go.mod
new file mode 100644
index 000000000..72bb1b9e8
--- /dev/null
+++ b/testdata/push/go.mod
@@ -0,0 +1,26 @@
+module registry
+
+go 1.22.5
+
+require (
+ github.com/google/go-containerregistry v0.20.2
+ github.com/spf13/pflag v1.0.5
+ gopkg.in/yaml.v2 v2.4.0
+)
+
+require (
+ github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
+ github.com/docker/cli v27.3.1+incompatible // indirect
+ github.com/docker/distribution v2.8.3+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.8.2 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/vbatts/tar-split v0.11.6 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sys v0.26.0 // indirect
+)
diff --git a/testdata/push/go.sum b/testdata/push/go.sum
new file mode 100644
index 000000000..6f7cce0f8
--- /dev/null
+++ b/testdata/push/go.sum
@@ -0,0 +1,60 @@
+github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
+github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
+github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
+github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
+github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
diff --git a/testdata/push/push.go b/testdata/push/push.go
new file mode 100644
index 000000000..72989b1dc
--- /dev/null
+++ b/testdata/push/push.go
@@ -0,0 +1,178 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/fs"
+ "log"
+ "os"
+ "strings"
+
+ "github.com/google/go-containerregistry/pkg/crane"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/mutate"
+ "github.com/spf13/pflag"
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ bundlesSubPath string = "bundles"
+ catalogsSubPath string = "catalogs"
+)
+
+func main() {
+ var (
+ registryAddr string
+ imagesPath string
+ )
+ flag.StringVar(®istryAddr, "registry-address", ":12345", "The address the registry binds to.")
+ flag.StringVar(&imagesPath, "images-path", "/images", "Image directory path")
+ pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+ pflag.Parse()
+
+ log.Printf("push operation configured with images path %s and destination %s", imagesPath, registryAddr)
+
+ bundlesFullPath := fmt.Sprintf("%s/%s", imagesPath, bundlesSubPath)
+ catalogsFullPath := fmt.Sprintf("%s/%s", imagesPath, catalogsSubPath)
+
+ bundles, err := buildBundles(bundlesFullPath)
+ if err != nil {
+ log.Fatalf("failed to build bundles: %s", err.Error())
+ }
+ catalogs, err := buildCatalogs(catalogsFullPath)
+ if err != nil {
+ log.Fatalf("failed to build catalogs: %s", err.Error())
+ }
+ // Push the images
+ for name, image := range bundles {
+ if err := crane.Push(image, fmt.Sprintf("%s/%s", registryAddr, name)); err != nil {
+ log.Fatalf("failed to push bundle images: %s", err.Error())
+ }
+ }
+ for name, image := range catalogs {
+ if err := crane.Push(image, fmt.Sprintf("%s/%s", registryAddr, name)); err != nil {
+ log.Fatalf("failed to push catalog images: %s", err.Error())
+ }
+ }
+ log.Printf("finished")
+ os.Exit(0)
+}
+
+func buildBundles(path string) (map[string]v1.Image, error) {
+ bundles, err := processImageDirTree(path)
+ if err != nil {
+ return nil, err
+ }
+ mutatedMap := make(map[string]v1.Image, 0)
+ // Apply required bundle labels
+ for key, img := range bundles {
+ // Replace ':' between image name and image tag for file path
+ metadataPath := strings.Replace(key, ":", "/", 1)
+ labels, err := getBundleLabels(fmt.Sprintf("%s/%s/%s", path, metadataPath, "metadata/annotations.yaml"))
+ if err != nil {
+ return nil, err
+ }
+ mutatedMap[fmt.Sprintf("bundles/registry-v1/%s", key)], err = mutate.Config(img, v1.Config{Labels: labels})
+ if err != nil {
+ return nil, fmt.Errorf("failed to apply image labels: %w", err)
+ }
+ }
+ return mutatedMap, nil
+}
+
+type bundleAnnotations struct {
+ annotations map[string]string
+}
+
+func getBundleLabels(path string) (map[string]string, error) {
+ var metadata bundleAnnotations
+ yamlFile, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ err = yaml.Unmarshal(yamlFile, metadata)
+ if err != nil {
+ return nil, err
+ }
+ return metadata.annotations, nil
+}
+
+func buildCatalogs(path string) (map[string]v1.Image, error) {
+ catalogs, err := processImageDirTree(path)
+ if err != nil {
+ return nil, err
+ }
+ mutatedMap := make(map[string]v1.Image, 0)
+ // Apply required catalog label
+ for key, img := range catalogs {
+ cfg := v1.Config{
+ Labels: map[string]string{
+ "operators.operatorframework.io.index.configs.v1": "/configs",
+ },
+ }
+ mutatedMap[fmt.Sprintf("e2e/%s", key)], err = mutate.Config(img, cfg)
+ if err != nil {
+ return nil, fmt.Errorf("failed to apply image labels: %w", err)
+ }
+ }
+ return mutatedMap, nil
+}
+
+func processImageDirTree(path string) (map[string]v1.Image, error) {
+ imageMap := make(map[string]v1.Image, 0)
+ images, err := os.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Each directory in 'path' represents an image
+ for _, entry := range images {
+ entryFullPath := fmt.Sprintf("%s/%s", path, entry.Name())
+ if !entry.IsDir() {
+ continue
+ }
+ tags, err := os.ReadDir(entryFullPath)
+ if err != nil {
+ return nil, err
+ }
+ // Each directory in the image directory represents a separate tag
+ for _, tag := range tags {
+ if !tag.IsDir() {
+ continue
+ }
+ tagFullPath := fmt.Sprintf("%s/%s", entryFullPath, tag.Name())
+
+ var fileMap map[string][]byte
+ fileMap, err = createFileMap(tagFullPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read files for image: %w", err)
+ }
+
+ image, err := crane.Image(fileMap)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate image: %w", err)
+ }
+ imageMap[fmt.Sprintf("%s:%s", entry.Name(), tag.Name())] = image
+ }
+ }
+ return imageMap, nil
+}
+
+func createFileMap(originPath string) (map[string][]byte, error) {
+ fileMap := make(map[string][]byte)
+ if err := fs.WalkDir(os.DirFS(originPath), ".", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if d != nil && !d.IsDir() {
+ fileMap[path], err = os.ReadFile(fmt.Sprintf("%s/%s", originPath, path))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return fileMap, nil
+}
diff --git a/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
new file mode 100644
index 000000000..6aba0ef1f
--- /dev/null
+++ b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
@@ -0,0 +1,689 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/sync/errgroup"
+)
+
+type options struct {
+ chunkSize int
+ compressionLevel int
+ prioritizedFiles []string
+ missedPrioritizedFiles *[]string
+ compression Compression
+ ctx context.Context
+ minChunkSize int
+}
+
+type Option func(o *options) error
+
+// WithChunkSize option specifies the chunk size of eStargz blob to build.
+func WithChunkSize(chunkSize int) Option {
+ return func(o *options) error {
+ o.chunkSize = chunkSize
+ return nil
+ }
+}
+
+// WithCompressionLevel option specifies the gzip compression level.
+// The default is gzip.BestCompression.
+// This option will be ignored if WithCompression option is used.
+// See also: https://godoc.org/compress/gzip#pkg-constants
+func WithCompressionLevel(level int) Option {
+ return func(o *options) error {
+ o.compressionLevel = level
+ return nil
+ }
+}
+
+// WithPrioritizedFiles option specifies the list of prioritized files.
+// These files must be complete paths that are absolute or relative to "/"
+// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
+// are treated as "/foo/bar".
+func WithPrioritizedFiles(files []string) Option {
+ return func(o *options) error {
+ o.prioritizedFiles = files
+ return nil
+ }
+}
+
+// WithAllowPrioritizeNotFound makes Build continue the execution even if some
+// of prioritized files specified by WithPrioritizedFiles option aren't found
+// in the input tar. Instead, this records all missed file names to the passed
+// slice.
+func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
+ return func(o *options) error {
+ if missedFiles == nil {
+ return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed")
+ }
+ o.missedPrioritizedFiles = missedFiles
+ return nil
+ }
+}
+
+// WithCompression specifies compression algorithm to be used.
+// Default is gzip.
+func WithCompression(compression Compression) Option {
+ return func(o *options) error {
+ o.compression = compression
+ return nil
+ }
+}
+
+// WithContext specifies a context that can be used for clean canceleration.
+func WithContext(ctx context.Context) Option {
+ return func(o *options) error {
+ o.ctx = ctx
+ return nil
+ }
+}
+
+// WithMinChunkSize option specifies the minimal number of bytes of data
+// must be written in one gzip stream.
+// By increasing this number, one gzip stream can contain multiple files
+// and it hopefully leads to smaller result blob.
+// NOTE: This adds a TOC property that old reader doesn't understand.
+func WithMinChunkSize(minChunkSize int) Option {
+ return func(o *options) error {
+ o.minChunkSize = minChunkSize
+ return nil
+ }
+}
+
+// Blob is an eStargz blob.
+type Blob struct {
+ io.ReadCloser
+ diffID digest.Digester
+ tocDigest digest.Digest
+}
+
+// DiffID returns the digest of uncompressed blob.
+// It is only valid to call DiffID after Close.
+func (b *Blob) DiffID() digest.Digest {
+ return b.diffID.Digest()
+}
+
+// TOCDigest returns the digest of uncompressed TOC JSON.
+func (b *Blob) TOCDigest() digest.Digest {
+ return b.tocDigest
+}
+
+// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
+// or plain tar) passed through the argument. If there are some prioritized files are listed in
+// the option, these files are grouped as "prioritized" and can be used for runtime optimization
+// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
+// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
+func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
+ var opts options
+ opts.compressionLevel = gzip.BestCompression // BestCompression by default
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+ if opts.compression == nil {
+ opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
+ }
+ layerFiles := newTempFiles()
+ ctx := opts.ctx
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ done := make(chan struct{})
+ defer close(done)
+ go func() {
+ select {
+ case <-done:
+ // nop
+ case <-ctx.Done():
+ layerFiles.CleanupAll()
+ }
+ }()
+ defer func() {
+ if rErr != nil {
+ if err := layerFiles.CleanupAll(); err != nil {
+ rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
+ }
+ }
+ if cErr := ctx.Err(); cErr != nil {
+ rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
+ }
+ }()
+ tarBlob, err := decompressBlob(tarBlob, layerFiles)
+ if err != nil {
+ return nil, err
+ }
+ entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles)
+ if err != nil {
+ return nil, err
+ }
+ var tarParts [][]*entry
+ if opts.minChunkSize > 0 {
+ // Each entry needs to know the size of the current gzip stream so they
+ // cannot be processed in parallel.
+ tarParts = [][]*entry{entries}
+ } else {
+ tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
+ }
+ writers := make([]*Writer, len(tarParts))
+ payloads := make([]*os.File, len(tarParts))
+ var mu sync.Mutex
+ var eg errgroup.Group
+ for i, parts := range tarParts {
+ i, parts := i, parts
+ // builds verifiable stargz sub-blobs
+ eg.Go(func() error {
+ esgzFile, err := layerFiles.TempFile("", "esgzdata")
+ if err != nil {
+ return err
+ }
+ sw := NewWriterWithCompressor(esgzFile, opts.compression)
+ sw.ChunkSize = opts.chunkSize
+ sw.MinChunkSize = opts.minChunkSize
+ if sw.needsOpenGzEntries == nil {
+ sw.needsOpenGzEntries = make(map[string]struct{})
+ }
+ for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
+ sw.needsOpenGzEntries[f] = struct{}{}
+ }
+ if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
+ return err
+ }
+ mu.Lock()
+ writers[i] = sw
+ payloads[i] = esgzFile
+ mu.Unlock()
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ rErr = err
+ return nil, err
+ }
+ tocAndFooter, tocDgst, err := closeWithCombine(writers...)
+ if err != nil {
+ rErr = err
+ return nil, err
+ }
+ var rs []io.Reader
+ for _, p := range payloads {
+ fs, err := fileSectionReader(p)
+ if err != nil {
+ return nil, err
+ }
+ rs = append(rs, fs)
+ }
+ diffID := digest.Canonical.Digester()
+ pr, pw := io.Pipe()
+ go func() {
+ r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer r.Close()
+ if _, err := io.Copy(diffID.Hash(), r); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ pw.Close()
+ }()
+ return &Blob{
+ ReadCloser: readCloser{
+ Reader: pr,
+ closeFunc: layerFiles.CleanupAll,
+ },
+ tocDigest: tocDgst,
+ diffID: diffID,
+ }, nil
+}
+
+// closeWithCombine takes unclosed Writers and close them. This also returns the
+// toc that combined all Writers into.
+// Writers doesn't write TOC and footer to the underlying writers so they can be
+// combined into a single eStargz and tocAndFooter returned by this function can
+// be appended at the tail of that combined blob.
+func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
+ if len(ws) == 0 {
+ return nil, "", fmt.Errorf("at least one writer must be passed")
+ }
+ for _, w := range ws {
+ if w.closed {
+ return nil, "", fmt.Errorf("writer must be unclosed")
+ }
+ defer func(w *Writer) { w.closed = true }(w)
+ if err := w.closeGz(); err != nil {
+ return nil, "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return nil, "", err
+ }
+ }
+ var (
+ mtoc = new(JTOC)
+ currentOffset int64
+ )
+ mtoc.Version = ws[0].toc.Version
+ for _, w := range ws {
+ for _, e := range w.toc.Entries {
+ // Recalculate Offset of non-empty files/chunks
+ if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
+ e.Offset += currentOffset
+ }
+ mtoc.Entries = append(mtoc.Entries, e)
+ }
+ if w.toc.Version > mtoc.Version {
+ mtoc.Version = w.toc.Version
+ }
+ currentOffset += w.cw.n
+ }
+
+ return tocAndFooter(ws[0].compressor, mtoc, currentOffset)
+}
+
+func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) {
+ buf := new(bytes.Buffer)
+ tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ return buf, tocDigest, nil
+}
+
+// divideEntries divides passed entries to the parts at least the number specified by the
+// argument.
+func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
+ var estimatedSize int64
+ for _, e := range entries {
+ estimatedSize += e.header.Size
+ }
+ unitSize := estimatedSize / int64(minPartsNum)
+ var (
+ nextEnd = unitSize
+ offset int64
+ )
+ set = append(set, []*entry{})
+ for _, e := range entries {
+ set[len(set)-1] = append(set[len(set)-1], e)
+ offset += e.header.Size
+ if offset > nextEnd {
+ set = append(set, []*entry{})
+ nextEnd += unitSize
+ }
+ }
+ return
+}
+
+var errNotFound = errors.New("not found")
+
+// sortEntries reads the specified tar blob and returns a list of tar entries.
+// If some of prioritized files are specified, the list starts from these
+// files with keeping the order specified by the argument.
+func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) {
+
+ // Import tar file.
+ intar, err := importTar(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to sort: %w", err)
+ }
+
+ // Sort the tar file respecting to the prioritized files list.
+ sorted := &tarFile{}
+ for _, l := range prioritized {
+ if err := moveRec(l, intar, sorted); err != nil {
+ if errors.Is(err, errNotFound) && missedPrioritized != nil {
+ *missedPrioritized = append(*missedPrioritized, l)
+ continue // allow not found
+ }
+ return nil, fmt.Errorf("failed to sort tar entries: %w", err)
+ }
+ }
+ if len(prioritized) == 0 {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ } else {
+ sorted.add(&entry{
+ header: &tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ },
+ payload: bytes.NewReader([]byte{landmarkContents}),
+ })
+ }
+
+ // Dump all entry and concatinate them.
+ return append(sorted.dump(), intar.dump()...), nil
+}
+
+// readerFromEntries returns a reader of tar archive that contains entries passed
+// through the arguments.
+func readerFromEntries(entries ...*entry) io.Reader {
+ pr, pw := io.Pipe()
+ go func() {
+ tw := tar.NewWriter(pw)
+ defer tw.Close()
+ for _, entry := range entries {
+ if err := tw.WriteHeader(entry.header); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
+ return
+ }
+ if _, err := io.Copy(tw, entry.payload); err != nil {
+ pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
+ return
+ }
+ }
+ pw.Close()
+ }()
+ return pr
+}
+
+func importTar(in io.ReaderAt) (*tarFile, error) {
+ tf := &tarFile{}
+ pw, err := newCountReadSeeker(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to make position watcher: %w", err)
+ }
+ tr := tar.NewReader(pw)
+
+ // Walk through all nodes.
+ for {
+ // Fetch and parse next header.
+ h, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("failed to parse tar file, %w", err)
+ }
+ switch cleanEntryName(h.Name) {
+ case PrefetchLandmark, NoPrefetchLandmark:
+ // Ignore existing landmark
+ continue
+ }
+
+ // Add entry. If it already exists, replace it.
+ if _, ok := tf.get(h.Name); ok {
+ tf.remove(h.Name)
+ }
+ tf.add(&entry{
+ header: h,
+ payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
+ })
+ }
+
+ return tf, nil
+}
+
+func moveRec(name string, in *tarFile, out *tarFile) error {
+ name = cleanEntryName(name)
+ if name == "" { // root directory. stop recursion.
+ if e, ok := in.get(name); ok {
+ // entry of the root directory exists. we should move it as well.
+ // this case will occur if tar entries are prefixed with "./", "/", etc.
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+ }
+
+ _, okIn := in.get(name)
+ _, okOut := out.get(name)
+ if !okIn && !okOut {
+ return fmt.Errorf("file: %q: %w", name, errNotFound)
+ }
+
+ parent, _ := path.Split(strings.TrimSuffix(name, "/"))
+ if err := moveRec(parent, in, out); err != nil {
+ return err
+ }
+ if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
+ if err := moveRec(e.header.Linkname, in, out); err != nil {
+ return err
+ }
+ }
+ if e, ok := in.get(name); ok {
+ out.add(e)
+ in.remove(name)
+ }
+ return nil
+}
+
+type entry struct {
+ header *tar.Header
+ payload io.ReadSeeker
+}
+
+type tarFile struct {
+ index map[string]*entry
+ stream []*entry
+}
+
+func (f *tarFile) add(e *entry) {
+ if f.index == nil {
+ f.index = make(map[string]*entry)
+ }
+ f.index[cleanEntryName(e.header.Name)] = e
+ f.stream = append(f.stream, e)
+}
+
+func (f *tarFile) remove(name string) {
+ name = cleanEntryName(name)
+ if f.index != nil {
+ delete(f.index, name)
+ }
+ var filtered []*entry
+ for _, e := range f.stream {
+ if cleanEntryName(e.header.Name) == name {
+ continue
+ }
+ filtered = append(filtered, e)
+ }
+ f.stream = filtered
+}
+
+func (f *tarFile) get(name string) (e *entry, ok bool) {
+ if f.index == nil {
+ return nil, false
+ }
+ e, ok = f.index[cleanEntryName(name)]
+ return
+}
+
+func (f *tarFile) dump() []*entry {
+ return f.stream
+}
+
+type readCloser struct {
+ io.Reader
+ closeFunc func() error
+}
+
+func (rc readCloser) Close() error {
+ return rc.closeFunc()
+}
+
+func fileSectionReader(file *os.File) (*io.SectionReader, error) {
+ info, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(file, 0, info.Size()), nil
+}
+
+func newTempFiles() *tempFiles {
+ return &tempFiles{}
+}
+
+type tempFiles struct {
+ files []*os.File
+ filesMu sync.Mutex
+ cleanupOnce sync.Once
+}
+
+func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
+ f, err := os.CreateTemp(dir, pattern)
+ if err != nil {
+ return nil, err
+ }
+ tf.filesMu.Lock()
+ tf.files = append(tf.files, f)
+ tf.filesMu.Unlock()
+ return f, nil
+}
+
+func (tf *tempFiles) CleanupAll() (err error) {
+ tf.cleanupOnce.Do(func() {
+ err = tf.cleanupAll()
+ })
+ return
+}
+
+func (tf *tempFiles) cleanupAll() error {
+ tf.filesMu.Lock()
+ defer tf.filesMu.Unlock()
+ var allErr []error
+ for _, f := range tf.files {
+ if err := f.Close(); err != nil {
+ allErr = append(allErr, err)
+ }
+ if err := os.Remove(f.Name()); err != nil {
+ allErr = append(allErr, err)
+ }
+ }
+ tf.files = nil
+ return errorutil.Aggregate(allErr)
+}
+
+func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
+ pos := int64(0)
+ return &countReadSeeker{r: r, cPos: &pos}, nil
+}
+
+type countReadSeeker struct {
+ r io.ReaderAt
+ cPos *int64
+
+ mu sync.Mutex
+}
+
+func (cr *countReadSeeker) Read(p []byte) (int, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ n, err := cr.r.ReadAt(p, *cr.cPos)
+ if err == nil {
+ *cr.cPos += int64(n)
+ }
+ return n, err
+}
+
+func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ switch whence {
+ default:
+ return 0, fmt.Errorf("Unknown whence: %v", whence)
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += *cr.cPos
+ case io.SeekEnd:
+ return 0, fmt.Errorf("Unsupported whence: %v", whence)
+ }
+
+ if offset < 0 {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ *cr.cPos = offset
+ return offset, nil
+}
+
+func (cr *countReadSeeker) currentPos() int64 {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+
+ return *cr.cPos
+}
+
+func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
+ if org.Size() < 4 {
+ return org, nil
+ }
+ src := make([]byte, 4)
+ if _, err := org.Read(src); err != nil && err != io.EOF {
+ return nil, err
+ }
+ var dR io.Reader
+ if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
+ // gzip
+ dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dgR.Close()
+ dR = io.Reader(dgR)
+ } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) {
+ // zstd
+ dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer dzR.Close()
+ dR = io.Reader(dzR)
+ } else {
+ // uncompressed
+ return io.NewSectionReader(org, 0, org.Size()), nil
+ }
+ b, err := tmp.TempFile("", "uncompresseddata")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(b, dR); err != nil {
+ return nil, err
+ }
+ return fileSectionReader(b)
+}
diff --git a/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
new file mode 100644
index 000000000..6de78b02d
--- /dev/null
+++ b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errorutil
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Aggregate combines a list of errors into a single new error.
+func Aggregate(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ points := make([]string, len(errs)+1)
+ points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
+ for i, err := range errs {
+ points[i+1] = fmt.Sprintf("* %s", err)
+ }
+ return errors.New(strings.Join(points, "\n\t"))
+ }
+}
diff --git a/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
new file mode 100644
index 000000000..f4d554655
--- /dev/null
+++ b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
@@ -0,0 +1,1223 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+// A Reader permits random access reads from a stargz file.
+type Reader struct {
+ sr *io.SectionReader
+ toc *JTOC
+ tocDigest digest.Digest
+
+ // m stores all non-chunk entries, keyed by name.
+ m map[string]*TOCEntry
+
+ // chunks stores all TOCEntry values for regular files that
+ // are split up. For a file with a single chunk, it's only
+ // stored in m.
+ chunks map[string][]*TOCEntry
+
+ decompressor Decompressor
+}
+
+type openOpts struct {
+ tocOffset int64
+ decompressors []Decompressor
+ telemetry *Telemetry
+}
+
+// OpenOption is an option used during opening the layer
+type OpenOption func(o *openOpts) error
+
+// WithTOCOffset option specifies the offset of TOC
+func WithTOCOffset(tocOffset int64) OpenOption {
+ return func(o *openOpts) error {
+ o.tocOffset = tocOffset
+ return nil
+ }
+}
+
+// WithDecompressors option specifies decompressors to use.
+// Default is gzip-based decompressor.
+func WithDecompressors(decompressors ...Decompressor) OpenOption {
+ return func(o *openOpts) error {
+ o.decompressors = decompressors
+ return nil
+ }
+}
+
+// WithTelemetry option specifies the telemetry hooks
+func WithTelemetry(telemetry *Telemetry) OpenOption {
+ return func(o *openOpts) error {
+ o.telemetry = telemetry
+ return nil
+ }
+}
+
+// MeasureLatencyHook is a func which takes start time and records the diff
+type MeasureLatencyHook func(time.Time)
+
+// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
+// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
+type Telemetry struct {
+ GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
+ GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds)
+ DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds)
+}
+
+// Open opens a stargz file for reading.
+// The behavior is configurable using options.
+//
+// Note that each entry name is normalized as the path that is relative to root.
+func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
+ var opts openOpts
+ for _, o := range opt {
+ if err := o(&opts); err != nil {
+ return nil, err
+ }
+ }
+
+ gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)}
+ decompressors := append(gzipCompressors, opts.decompressors...)
+
+ // Determine the size to fetch. Try to fetch as many bytes as possible.
+ fetchSize := maxFooterSize(sr.Size(), decompressors...)
+ if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize {
+ if maybeTocOffset > sr.Size() {
+ return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size())
+ }
+ fetchSize = sr.Size() - maybeTocOffset
+ }
+
+ start := time.Now() // before getting layer footer
+ footer := make([]byte, fetchSize)
+ if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil {
+ return nil, fmt.Errorf("error reading footer: %v", err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil {
+ opts.telemetry.GetFooterLatency(start)
+ }
+
+ var allErr []error
+ var found bool
+ var r *Reader
+ for _, d := range decompressors {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ maybeTocBytes := footer[:fOffset]
+ _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
+ if err != nil {
+ allErr = append(allErr, err)
+ continue
+ }
+ if tocOffset >= 0 && tocSize <= 0 {
+ tocSize = sr.Size() - tocOffset - fSize
+ }
+ if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
+ maybeTocBytes = maybeTocBytes[:tocSize]
+ }
+ r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
+ if err == nil {
+ found = true
+ break
+ }
+ allErr = append(allErr, err)
+ }
+ if !found {
+ return nil, errorutil.Aggregate(allErr)
+ }
+ if err := r.initFields(); err != nil {
+ return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
+ }
+ return r, nil
+}
+
+// OpenFooter extracts and parses footer from the given blob.
+// only supports gzip-based eStargz.
+func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
+ if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
+ return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
+ }
+ var footer [FooterSize]byte
+ if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
+ return 0, 0, fmt.Errorf("error reading footer: %v", err)
+ }
+ var allErr []error
+ for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} {
+ fSize := d.FooterSize()
+ fOffset := positive(int64(len(footer)) - fSize)
+ _, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
+ if err == nil {
+ return tocOffset, fSize, err
+ }
+ allErr = append(allErr, err)
+ }
+ return 0, 0, errorutil.Aggregate(allErr)
+}
+
+// initFields populates the Reader from r.toc after decoding it from
+// JSON.
+//
+// Unexported fields are populated and TOCEntry fields that were
+// implicit in the JSON are populated.
+func (r *Reader) initFields() error {
+ r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
+ r.chunks = make(map[string][]*TOCEntry)
+ var lastPath string
+ uname := map[int]string{}
+ gname := map[int]string{}
+ var lastRegEnt *TOCEntry
+ var chunkTopIndex int
+ for i, ent := range r.toc.Entries {
+ ent.Name = cleanEntryName(ent.Name)
+ switch ent.Type {
+ case "reg", "chunk":
+ if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
+ chunkTopIndex = i
+ }
+ ent.chunkTopIndex = chunkTopIndex
+ }
+ if ent.Type == "reg" {
+ lastRegEnt = ent
+ }
+ if ent.Type == "chunk" {
+ ent.Name = lastPath
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ if ent.ChunkSize == 0 && lastRegEnt != nil {
+ ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
+ }
+ } else {
+ lastPath = ent.Name
+
+ if ent.Uname != "" {
+ uname[ent.UID] = ent.Uname
+ } else {
+ ent.Uname = uname[ent.UID]
+ }
+ if ent.Gname != "" {
+ gname[ent.GID] = ent.Gname
+ } else {
+ ent.Gname = uname[ent.GID]
+ }
+
+ ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
+
+ if ent.Type == "dir" {
+ ent.NumLink++ // Parent dir links to this directory
+ }
+ r.m[ent.Name] = ent
+ }
+ if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
+ r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
+ r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
+ }
+ if ent.ChunkSize == 0 && ent.Size != 0 {
+ ent.ChunkSize = ent.Size
+ }
+ }
+
+ // Populate children, add implicit directories:
+ for _, ent := range r.toc.Entries {
+ if ent.Type == "chunk" {
+ continue
+ }
+ // add "foo/":
+ // add "foo" child to "" (creating "" if necessary)
+ //
+ // add "foo/bar/":
+ // add "bar" child to "foo" (creating "foo" if necessary)
+ //
+ // add "foo/bar.txt":
+ // add "bar.txt" child to "foo" (creating "foo" if necessary)
+ //
+ // add "a/b/c/d/e/f.txt":
+ // create "a/b/c/d/e" node
+ // add "f.txt" child to "e"
+
+ name := ent.Name
+ pdirName := parentDir(name)
+ if name == pdirName {
+ // This entry and its parent are the same.
+ // Ignore this for avoiding infinite loop of the reference.
+ // The example case where this can occur is when tar contains the root
+ // directory itself (e.g. "./", "/").
+ continue
+ }
+ pdir := r.getOrCreateDir(pdirName)
+ ent.NumLink++ // at least one name(ent.Name) references this entry.
+ if ent.Type == "hardlink" {
+ org, err := r.getSource(ent)
+ if err != nil {
+ return err
+ }
+ org.NumLink++ // original entry is referenced by this ent.Name.
+ ent = org
+ }
+ pdir.addChild(path.Base(name), ent)
+ }
+
+ lastOffset := r.sr.Size()
+ for i := len(r.toc.Entries) - 1; i >= 0; i-- {
+ e := r.toc.Entries[i]
+ if e.isDataType() {
+ e.nextOffset = lastOffset
+ }
+ if e.Offset != 0 && e.InnerOffset == 0 {
+ lastOffset = e.Offset
+ }
+ }
+
+ return nil
+}
+
+func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) {
+ if ent.Type == "hardlink" {
+ org, ok := r.m[cleanEntryName(ent.LinkName)]
+ if !ok {
+ return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
+ }
+ ent, err = r.getSource(org)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ent, nil
+}
+
+func parentDir(p string) string {
+ dir, _ := path.Split(p)
+ return strings.TrimSuffix(dir, "/")
+}
+
+func (r *Reader) getOrCreateDir(d string) *TOCEntry {
+ e, ok := r.m[d]
+ if !ok {
+ e = &TOCEntry{
+ Name: d,
+ Type: "dir",
+ Mode: 0755,
+ NumLink: 2, // The directory itself(.) and the parent link to this directory.
+ }
+ r.m[d] = e
+ if d != "" {
+ pdir := r.getOrCreateDir(parentDir(d))
+ pdir.addChild(path.Base(d), e)
+ }
+ }
+ return e
+}
+
+func (r *Reader) TOCDigest() digest.Digest {
+ return r.tocDigest
+}
+
+// VerifyTOC checks that the TOC JSON in the passed blob matches the
+// passed digests and that the TOC JSON contains digests for all chunks
+// contained in the blob. If the verification succceeds, this function
+// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
+func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
+ // Verify the digest of TOC JSON
+ if r.tocDigest != tocDigest {
+ return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
+ }
+ return r.Verifiers()
+}
+
+// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
+// because this doesn't verify TOC.
+func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
+ chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
+ regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
+ var chunkDigestMapIncomplete bool
+ var regDigestMapIncomplete bool
+ var containsChunk bool
+ for _, e := range r.toc.Entries {
+ if e.Type != "reg" && e.Type != "chunk" {
+ continue
+ }
+
+ // offset must be unique in stargz blob
+ _, dOK := chunkDigestMap[e.Offset]
+ _, rOK := regDigestMap[e.Offset]
+ if dOK || rOK {
+ return nil, fmt.Errorf("offset %d found twice", e.Offset)
+ }
+
+ if e.Type == "reg" {
+ if e.Size == 0 {
+ continue // ignores empty file
+ }
+
+ // record the digest of regular file payload
+ if e.Digest != "" {
+ d, err := digest.Parse(e.Digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
+ }
+ regDigestMap[e.Offset] = d
+ } else {
+ regDigestMapIncomplete = true
+ }
+ } else {
+ containsChunk = true // this layer contains "chunk" entries.
+ }
+
+ // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of
+ // chunked file)
+ if e.ChunkDigest != "" {
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
+ }
+ chunkDigestMap[e.Offset] = d
+ } else {
+ chunkDigestMapIncomplete = true
+ }
+ }
+
+ if chunkDigestMapIncomplete {
+ // Though some chunk digests are not found, if this layer doesn't contain
+ // "chunk"s and all digest of "reg" files are recorded, we can use them instead.
+ if !containsChunk && !regDigestMapIncomplete {
+ return &verifier{digestMap: regDigestMap}, nil
+ }
+ return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON")
+ }
+
+ return &verifier{digestMap: chunkDigestMap}, nil
+}
+
+// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
+// offset of the chunk.
+type verifier struct {
+ digestMap map[int64]digest.Digest
+ digestMapMu sync.Mutex
+}
+
+// Verifier returns a content verifier specified by TOCEntry.
+func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) {
+ v.digestMapMu.Lock()
+ defer v.digestMapMu.Unlock()
+ d, ok := v.digestMap[ce.Offset]
+ if !ok {
+ return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
+ ce.Offset, ce.ChunkSize)
+ }
+ return d.Verifier(), nil
+}
+
+// ChunkEntryForOffset returns the TOCEntry containing the byte of the
+// named file at the given offset within the file.
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
+ name = cleanEntryName(name)
+ e, ok = r.Lookup(name)
+ if !ok || !e.isDataType() {
+ return nil, false
+ }
+ ents := r.chunks[name]
+ if len(ents) < 2 {
+ if offset >= e.ChunkSize {
+ return nil, false
+ }
+ return e, true
+ }
+ i := sort.Search(len(ents), func(i int) bool {
+ e := ents[i]
+ return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
+ })
+ if i == len(ents) {
+ return nil, false
+ }
+ return ents[i], true
+}
+
+// Lookup returns the Table of Contents entry for the given path.
+//
+// To get the root directory, use the empty string.
+// Path must be absolute path or one that is relative to root.
+func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
+ path = cleanEntryName(path)
+ if r == nil {
+ return
+ }
+ e, ok = r.m[path]
+ if ok && e.Type == "hardlink" {
+ var err error
+ e, err = r.getSource(e)
+ if err != nil {
+ return nil, false
+ }
+ }
+ return
+}
+
+// OpenFile returns the reader of the specified file payload.
+//
+// Name must be absolute path or one that is relative to root.
+func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) newFileReader(name string) (*fileReader, error) {
+ name = cleanEntryName(name)
+ ent, ok := r.Lookup(name)
+ if !ok {
+ // TODO: come up with some error plan. This is lazy:
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: os.ErrNotExist,
+ }
+ }
+ if ent.Type != "reg" {
+ return nil, &os.PathError{
+ Path: name,
+ Op: "OpenFile",
+ Err: errors.New("not a regular file"),
+ }
+ }
+ return &fileReader{
+ r: r,
+ size: ent.Size,
+ ents: r.getChunks(ent),
+ }, nil
+}
+
+func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
+ fr, err := r.newFileReader(name)
+ if err != nil {
+ return nil, err
+ }
+ fr.preRead = preRead
+ return io.NewSectionReader(fr, 0, fr.size), nil
+}
+
+func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
+ if ents, ok := r.chunks[ent.Name]; ok {
+ return ents
+ }
+ return []*TOCEntry{ent}
+}
+
+type fileReader struct {
+ r *Reader
+ size int64
+ ents []*TOCEntry // 1 or more reg/chunk entries
+ preRead func(*TOCEntry, io.Reader) error
+}
+
+func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
+ if off >= fr.size {
+ return 0, io.EOF
+ }
+ if off < 0 {
+ return 0, errors.New("invalid offset")
+ }
+ var i int
+ if len(fr.ents) > 1 {
+ i = sort.Search(len(fr.ents), func(i int) bool {
+ return fr.ents[i].ChunkOffset >= off
+ })
+ if i == len(fr.ents) {
+ i = len(fr.ents) - 1
+ }
+ }
+ ent := fr.ents[i]
+ if ent.ChunkOffset > off {
+ if i == 0 {
+ return 0, errors.New("internal error; first chunk offset is non-zero")
+ }
+ ent = fr.ents[i-1]
+ }
+
+ // If ent is a chunk of a large file, adjust the ReadAt
+ // offset by the chunk's offset.
+ off -= ent.ChunkOffset
+
+ finalEnt := fr.ents[len(fr.ents)-1]
+ compressedOff := ent.Offset
+ // compressedBytesRemain is the number of compressed bytes in this
+ // file remaining, over 1+ chunks.
+ compressedBytesRemain := finalEnt.NextOffset() - compressedOff
+
+ sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain)
+
+ const maxRead = 2 << 20
+ var bufSize = maxRead
+ if compressedBytesRemain < maxRead {
+ bufSize = int(compressedBytesRemain)
+ }
+
+ br := bufio.NewReaderSize(sr, bufSize)
+ if _, err := br.Peek(bufSize); err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
+ }
+
+ dr, err := fr.r.decompressor.Reader(br)
+ if err != nil {
+ return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
+ }
+ defer dr.Close()
+
+ if fr.preRead == nil {
+ if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
+ return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
+ }
+ return io.ReadFull(dr, p)
+ }
+
+ var retN int
+ var retErr error
+ var found bool
+ var nr int64
+ for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
+ if !e.isDataType() {
+ continue
+ }
+ if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
+ break
+ }
+ if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
+ return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
+ }
+ nr = e.InnerOffset
+ if e == ent {
+ found = true
+ if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
+ return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
+ }
+ retN, retErr = io.ReadFull(dr, p)
+ nr += off + int64(retN)
+ continue
+ }
+ cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
+ if err := fr.preRead(e, cr); err != nil {
+ return 0, fmt.Errorf("failed to pre read: %w", err)
+ }
+ nr += cr.n
+ }
+ if !found {
+ return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
+ }
+ return retN, retErr
+}
+
+// A Writer writes stargz files.
+//
+// Use NewWriter to create a new Writer.
+type Writer struct {
+ bw *bufio.Writer
+ cw *countWriter
+ toc *JTOC
+ diffHash hash.Hash // SHA-256 of uncompressed tar
+
+ closed bool
+ gz io.WriteCloser
+ lastUsername map[int]string
+ lastGroupname map[int]string
+ compressor Compressor
+
+ uncompressedCounter *countWriteFlusher
+
+ // ChunkSize optionally controls the maximum number of bytes
+ // of data of a regular file that can be written in one gzip
+ // stream before a new gzip stream is started.
+ // Zero means to use a default, currently 4 MiB.
+ ChunkSize int
+
+ // MinChunkSize optionally controls the minimum number of bytes
+ // of data must be written in one gzip stream before a new gzip
+ // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
+ MinChunkSize int
+
+ needsOpenGzEntries map[string]struct{}
+}
+
+// currentCompressionWriter writes to the current w.gz field, which can
+// change throughout writing a tar entry.
+//
+// Additionally, it updates w's SHA-256 of the uncompressed bytes
+// of the tar file.
+type currentCompressionWriter struct{ w *Writer }
+
+func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
+ ccw.w.diffHash.Write(p)
+ if ccw.w.gz == nil {
+ if err := ccw.w.condOpenGz(); err != nil {
+ return 0, err
+ }
+ }
+ return ccw.w.gz.Write(p)
+}
+
+func (w *Writer) chunkSize() int {
+ if w.ChunkSize <= 0 {
+ return 4 << 20
+ }
+ return w.ChunkSize
+}
+
+// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
+// TOC JSON and footer are removed.
+func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
+ footerSize := c.FooterSize()
+ if sr.Size() < footerSize {
+ return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
+ }
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ return nil, err
+ }
+ blobPayloadSize, _, _, err := c.ParseFooter(footer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse footer: %w", err)
+ }
+ if blobPayloadSize < 0 {
+ blobPayloadSize = sr.Size()
+ }
+ return c.Reader(io.LimitReader(sr, blobPayloadSize))
+}
+
+// NewWriter returns a new stargz writer (gzip-based) writing to w.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterLevel(w, gzip.BestCompression)
+}
+
+// NewWriterLevel returns a new stargz writer (gzip-based) writing to w.
+// The compression level is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
+ return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
+}
+
+// NewWriterWithCompressor returns a new stargz writer writing to w.
+// The compression method is configurable.
+//
+// The writer must be closed to write its trailing table of contents.
+func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
+ bw := bufio.NewWriter(w)
+ cw := &countWriter{w: bw}
+ return &Writer{
+ bw: bw,
+ cw: cw,
+ toc: &JTOC{Version: 1},
+ diffHash: sha256.New(),
+ compressor: c,
+ uncompressedCounter: &countWriteFlusher{},
+ }
+}
+
+// Close writes the stargz's table of contents and flushes all the
+// buffers, returning any error.
+func (w *Writer) Close() (digest.Digest, error) {
+ if w.closed {
+ return "", nil
+ }
+ defer func() { w.closed = true }()
+
+ if err := w.closeGz(); err != nil {
+ return "", err
+ }
+
+ // Write the TOC index and footer.
+ tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash)
+ if err != nil {
+ return "", err
+ }
+ if err := w.bw.Flush(); err != nil {
+ return "", err
+ }
+
+ return tocDigest, nil
+}
+
+func (w *Writer) closeGz() error {
+ if w.closed {
+ return errors.New("write on closed Writer")
+ }
+ if w.gz != nil {
+ if err := w.gz.Close(); err != nil {
+ return err
+ }
+ w.gz = nil
+ }
+ return nil
+}
+
+func (w *Writer) flushGz() error {
+ if w.closed {
+ return errors.New("flush on closed Writer")
+ }
+ if w.gz != nil {
+ if f, ok := w.gz.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ }
+ return nil
+}
+
+// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
+// in which case it returns the empty string.
+func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
+ if name == "" {
+ return ""
+ }
+ if *mp == nil {
+ *mp = make(map[int]string)
+ }
+ if (*mp)[id] == name {
+ return ""
+ }
+ (*mp)[id] = name
+ return name
+}
+
+func (w *Writer) condOpenGz() (err error) {
+ if w.gz == nil {
+ w.gz, err = w.compressor.Writer(w.cw)
+ if w.gz != nil {
+ w.gz = w.uncompressedCounter.register(w.gz)
+ }
+ }
+ return
+}
+
+// AppendTar reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+func (w *Writer) AppendTar(r io.Reader) error {
+ return w.appendTar(r, false)
+}
+
+// AppendTarLossLess reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+//
+// The difference of this func with AppendTar is that this writes
+// the input tar stream into w without any modification (e.g. to header bytes).
+//
+// Note that if the input tar stream already contains TOC JSON, this returns
+// error because w cannot overwrite the TOC JSON to the one generated by w without
+// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
+// you shoud decompress it and remove TOC JSON in advance.
+func (w *Writer) AppendTarLossLess(r io.Reader) error {
+ return w.appendTar(r, true)
+}
+
+func (w *Writer) appendTar(r io.Reader, lossless bool) error {
+ var src io.Reader
+ br := bufio.NewReader(r)
+ if isGzip(br) {
+ zr, _ := gzip.NewReader(br)
+ src = zr
+ } else {
+ src = io.Reader(br)
+ }
+ dst := currentCompressionWriter{w}
+ var tw *tar.Writer
+ if !lossless {
+ tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
+ }
+ tr := tar.NewReader(src)
+ if lossless {
+ tr.RawAccounting = true
+ }
+ prevOffset := w.cw.n
+ var prevOffsetUncompressed int64
+ for {
+ h, err := tr.Next()
+ if err == io.EOF {
+ if lossless {
+ if remain := tr.RawBytes(); len(remain) > 0 {
+ // Collect the remaining null bytes.
+ // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
+ if _, err := dst.Write(remain); err != nil {
+ return err
+ }
+ }
+ }
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
+ }
+ if cleanEntryName(h.Name) == TOCTarName {
+ // It is possible for a layer to be "stargzified" twice during the
+ // distribution lifecycle. So we reserve "TOCTarName" here to avoid
+ // duplicated entries in the resulting layer.
+ if lossless {
+ // We cannot handle this in lossless way.
+ return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
+ }
+ continue
+ }
+
+ xattrs := make(map[string][]byte)
+ const xattrPAXRecordsPrefix = "SCHILY.xattr."
+ if h.PAXRecords != nil {
+ for k, v := range h.PAXRecords {
+ if strings.HasPrefix(k, xattrPAXRecordsPrefix) {
+ xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v)
+ }
+ }
+ }
+ ent := &TOCEntry{
+ Name: h.Name,
+ Mode: h.Mode,
+ UID: h.Uid,
+ GID: h.Gid,
+ Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
+ Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
+ ModTime3339: formatModtime(h.ModTime),
+ Xattrs: xattrs,
+ }
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+ if tw != nil {
+ if err := tw.WriteHeader(h); err != nil {
+ return err
+ }
+ } else {
+ if _, err := dst.Write(tr.RawBytes()); err != nil {
+ return err
+ }
+ }
+ switch h.Typeflag {
+ case tar.TypeLink:
+ ent.Type = "hardlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeSymlink:
+ ent.Type = "symlink"
+ ent.LinkName = h.Linkname
+ case tar.TypeDir:
+ ent.Type = "dir"
+ case tar.TypeReg:
+ ent.Type = "reg"
+ ent.Size = h.Size
+ case tar.TypeChar:
+ ent.Type = "char"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeBlock:
+ ent.Type = "block"
+ ent.DevMajor = int(h.Devmajor)
+ ent.DevMinor = int(h.Devminor)
+ case tar.TypeFifo:
+ ent.Type = "fifo"
+ default:
+ return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
+ }
+
+ // We need to keep a reference to the TOC entry for regular files, so that we
+ // can fill the digest later.
+ var regFileEntry *TOCEntry
+ var payloadDigest digest.Digester
+ if h.Typeflag == tar.TypeReg {
+ regFileEntry = ent
+ payloadDigest = digest.Canonical.Digester()
+ }
+
+ if h.Typeflag == tar.TypeReg && ent.Size > 0 {
+ var written int64
+ totalSize := ent.Size // save it before we destroy ent
+ tee := io.TeeReader(tr, payloadDigest.Hash())
+ for written < totalSize {
+ chunkSize := int64(w.chunkSize())
+ remain := totalSize - written
+ if remain < chunkSize {
+ chunkSize = remain
+ } else {
+ ent.ChunkSize = chunkSize
+ }
+
+ // We flush the underlying compression writer here to correctly calculate "w.cw.n".
+ if err := w.flushGz(); err != nil {
+ return err
+ }
+ if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
+ if err := w.closeGz(); err != nil {
+ return err
+ }
+ ent.Offset = w.cw.n
+ prevOffset = ent.Offset
+ prevOffsetUncompressed = w.uncompressedCounter.n
+ } else {
+ ent.Offset = prevOffset
+ ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
+ }
+
+ ent.ChunkOffset = written
+ chunkDigest := digest.Canonical.Digester()
+
+ if err := w.condOpenGz(); err != nil {
+ return err
+ }
+
+ teeChunk := io.TeeReader(tee, chunkDigest.Hash())
+ var out io.Writer
+ if tw != nil {
+ out = tw
+ } else {
+ out = dst
+ }
+ if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
+ return fmt.Errorf("error copying %q: %v", h.Name, err)
+ }
+ ent.ChunkDigest = chunkDigest.Digest().String()
+ w.toc.Entries = append(w.toc.Entries, ent)
+ written += chunkSize
+ ent = &TOCEntry{
+ Name: h.Name,
+ Type: "chunk",
+ }
+ }
+ } else {
+ w.toc.Entries = append(w.toc.Entries, ent)
+ }
+ if payloadDigest != nil {
+ regFileEntry.Digest = payloadDigest.Digest().String()
+ }
+ if tw != nil {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ }
+ }
+ remainDest := io.Discard
+ if lossless {
+ remainDest = dst // Preserve the remaining bytes in lossless mode
+ }
+ _, err := io.Copy(remainDest, src)
+ return err
+}
+
+func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
+ if ent.Type != "reg" {
+ return false
+ }
+ if w.needsOpenGzEntries == nil {
+ return false
+ }
+ _, ok := w.needsOpenGzEntries[ent.Name]
+ return ok
+}
+
+// DiffID returns the SHA-256 of the uncompressed tar bytes.
+// It is only valid to call DiffID after Close.
+func (w *Writer) DiffID() string {
+ return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
+}
+
+func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
+ for _, d := range decompressors {
+ if s := d.FooterSize(); res < s && s <= blobSize {
+ res = s
+ }
+ }
+ return
+}
+
+func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
+ if tocOff < 0 {
+ // This means that TOC isn't contained in the blob.
+ // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
+ // the external location.
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(nil)
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ if len(tocBytes) > 0 {
+ start := time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err == nil {
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+ }
+ }
+
+ start := time.Now()
+ tocBytes = make([]byte, tocSize)
+ if _, err := sr.ReadAt(tocBytes, tocOff); err != nil {
+ return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err)
+ }
+ if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
+ opts.telemetry.GetTocLatency(start)
+ }
+ start = time.Now()
+ toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
+ if err != nil {
+ return nil, err
+ }
+ if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
+ opts.telemetry.DeserializeTocLatency(start)
+ }
+ return &Reader{
+ sr: sr,
+ toc: toc,
+ tocDigest: tocDgst,
+ decompressor: d,
+ }, nil
+}
+
+func formatModtime(t time.Time) string {
+ if t.IsZero() || t.Unix() == 0 {
+ return ""
+ }
+ return t.UTC().Round(time.Second).Format(time.RFC3339)
+}
+
+func cleanEntryName(name string) string {
+ // Use path.Clean to consistently deal with path separators across platforms.
+ return strings.TrimPrefix(path.Clean("/"+name), "/")
+}
+
+// countWriter counts how many bytes have been written to its wrapped
+// io.Writer.
+type countWriter struct {
+ w io.Writer
+ n int64
+}
+
+func (cw *countWriter) Write(p []byte) (n int, err error) {
+ n, err = cw.w.Write(p)
+ cw.n += int64(n)
+ return
+}
+
+type countWriteFlusher struct {
+ io.WriteCloser
+ n int64
+}
+
+func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
+ wc.WriteCloser = w
+ return wc
+}
+
+func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
+ n, err = wc.WriteCloser.Write(p)
+ wc.n += int64(n)
+ return
+}
+
+func (wc *countWriteFlusher) Flush() error {
+ if f, ok := wc.WriteCloser.(interface {
+ Flush() error
+ }); ok {
+ return f.Flush()
+ }
+ return nil
+}
+
+func (wc *countWriteFlusher) Close() error {
+ err := wc.WriteCloser.Close()
+ wc.WriteCloser = nil
+ return err
+}
+
+// isGzip reports whether br is positioned right before an upcoming gzip stream.
+// It does not consume any bytes from br.
+func isGzip(br *bufio.Reader) bool {
+ const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ )
+ peek, _ := br.Peek(3)
+ return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
+}
+
+func positive(n int64) int64 {
+ if n < 0 {
+ return 0
+ }
+ return n
+}
+
+type countReader struct {
+ r io.Reader
+ n int64
+}
+
+func (cr *countReader) Read(p []byte) (n int, err error) {
+ n, err = cr.r.Read(p)
+ cr.n += int64(n)
+ return
+}
diff --git a/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
new file mode 100644
index 000000000..f24afe32f
--- /dev/null
+++ b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
@@ -0,0 +1,237 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "strconv"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+type gzipCompression struct {
+ *GzipCompressor
+ *GzipDecompressor
+}
+
+func newGzipCompressionWithLevel(level int) Compression {
+ return &gzipCompression{
+ &GzipCompressor{level},
+ &GzipDecompressor{},
+ }
+}
+
+func NewGzipCompressor() *GzipCompressor {
+ return &GzipCompressor{gzip.BestCompression}
+}
+
+func NewGzipCompressorWithLevel(level int) *GzipCompressor {
+ return &GzipCompressor{level}
+}
+
+type GzipCompressor struct {
+ compressionLevel int
+}
+
+func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
+ return gzip.NewWriterLevel(w, gc.compressionLevel)
+}
+
+func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
+ tocJSON, err := json.MarshalIndent(toc, "", "\t")
+ if err != nil {
+ return "", err
+ }
+ gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel)
+ gw := io.Writer(gz)
+ if diffHash != nil {
+ gw = io.MultiWriter(gz, diffHash)
+ }
+ tw := tar.NewWriter(gw)
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: TOCTarName,
+ Size: int64(len(tocJSON)),
+ }); err != nil {
+ return "", err
+ }
+ if _, err := tw.Write(tocJSON); err != nil {
+ return "", err
+ }
+
+ if err := tw.Close(); err != nil {
+ return "", err
+ }
+ if err := gz.Close(); err != nil {
+ return "", err
+ }
+ if _, err := w.Write(gzipFooterBytes(off)); err != nil {
+ return "", err
+ }
+ return digest.FromBytes(tocJSON), nil
+}
+
+// gzipFooterBytes returns the 51 bytes footer.
+func gzipFooterBytes(tocOff int64) []byte {
+ buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
+ gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
+
+ // Extra header indicating the offset of TOCJSON
+ // https://tools.ietf.org/html/rfc1952#section-2.3.1.1
+ header := make([]byte, 4)
+ header[0], header[1] = 'S', 'G'
+ subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
+ binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
+ gz.Header.Extra = append(header, []byte(subfield)...)
+ gz.Close()
+ if buf.Len() != FooterSize {
+ panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
+ }
+ return buf.Bytes()
+}
+
+type GzipDecompressor struct{}
+
+func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != FooterSize {
+ return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
+ if si1 != 'S' || si2 != 'G' {
+ return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
+ }
+ if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
+ return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
+ }
+ if string(subfield[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
+ }
+ tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *GzipDecompressor) FooterSize() int64 {
+ return FooterSize
+}
+
+func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+type LegacyGzipDecompressor struct{}
+
+func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+ return gzip.NewReader(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ return parseTOCEStargz(r)
+}
+
+func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
+ if len(p) != legacyFooterSize {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
+ }
+ zr, err := gzip.NewReader(bytes.NewReader(p))
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
+ }
+ defer zr.Close()
+ extra := zr.Header.Extra
+ if len(extra) != 16+len("STARGZ") {
+ return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
+ }
+ if string(extra[16:]) != "STARGZ" {
+ return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
+ }
+ tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
+ }
+ return tocOffset, tocOffset, 0, nil
+}
+
+func (gz *LegacyGzipDecompressor) FooterSize() int64 {
+ return legacyFooterSize
+}
+
+func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ tr, err := decompressTOCEStargz(r)
+ if err != nil {
+ return nil, "", err
+ }
+ dgstr := digest.Canonical.Digester()
+ toc = new(JTOC)
+ if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
+ return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
+ }
+ if err := tr.Close(); err != nil {
+ return nil, "", err
+ }
+ return toc, dgstr.Digest(), nil
+}
+
+func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ zr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
+ }
+ zr.Multistream(false)
+ tr := tar.NewReader(zr)
+ h, err := tr.Next()
+ if err != nil {
+ return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
+ }
+ if h.Name != TOCTarName {
+ return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
+ }
+ return readCloser{tr, zr.Close}, nil
+}
diff --git a/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
new file mode 100644
index 000000000..0ca6fd75f
--- /dev/null
+++ b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -0,0 +1,2366 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/containerd/stargz-snapshotter/estargz/errorutil"
+ "github.com/klauspost/compress/zstd"
+ digest "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// TestingController is Compression with some helper methods necessary for testing.
+type TestingController interface {
+ Compression
+ TestStreams(t *testing.T, b []byte, streams []int64)
+ DiffIDOf(*testing.T, []byte) string
+ String() string
+}
+
+// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
+func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
+ t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
+ t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
+ t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
+}
+
+type TestingControllerFactory func() TestingController
+
+const (
+ uncompressedType int = iota
+ gzipType
+ zstdType
+)
+
+var srcCompressions = []int{
+ uncompressedType,
+ gzipType,
+ zstdType,
+}
+
+var allowedPrefix = [4]string{"", "./", "/", "../"}
+
+// testBuild tests the resulting stargz blob built by this pkg has the same
+// contents as the normal stargz blob.
+func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize []int
+ in []tarEntry
+ }{
+ {
+ name: "regfiles and directories",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "test1"),
+ dir("foo2/"),
+ file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})),
+ ),
+ },
+ {
+ name: "empty files",
+ chunkSize: 4,
+ in: tarOf(
+ file("foo", "tttttt"),
+ file("foo_empty", ""),
+ file("foo2", "tttttt"),
+ file("foo_empty2", ""),
+ file("foo3", "tttttt"),
+ file("foo_empty3", ""),
+ file("foo4", "tttttt"),
+ file("foo_empty4", ""),
+ file("foo5", "tttttt"),
+ file("foo_empty5", ""),
+ file("foo6", "tttttt"),
+ ),
+ },
+ {
+ name: "various files",
+ chunkSize: 4,
+ minChunkSize: []int{0, 64000},
+ in: tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo1.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ {
+ name: "no contents",
+ chunkSize: 4,
+ in: tarOf(
+ file("baz.txt", ""),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ dir("dev/"),
+ blockdev("dev/testblock", 3, 4),
+ fifo("dev/testfifo"),
+ chardev("dev/testchar1", 5, 6),
+ file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ chardev("dev/testchar2", 1, 2),
+ ),
+ },
+ }
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
+ tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
+ // Test divideEntries()
+ entries, err := sortEntries(tarBlob, nil, nil) // identical order
+ if err != nil {
+ t.Fatalf("failed to parse tar: %v", err)
+ }
+ var merged []*entry
+ for _, part := range divideEntries(entries, 4) {
+ merged = append(merged, part...)
+ }
+ if !reflect.DeepEqual(entries, merged) {
+ for _, e := range entries {
+ t.Logf("Original: %v", e.header)
+ }
+ for _, e := range merged {
+ t.Logf("Merged: %v", e.header)
+ }
+ t.Errorf("divided entries couldn't be merged")
+ return
+ }
+
+ // Prepare sample data
+ cl1 := newCL()
+ wantBuf := new(bytes.Buffer)
+ sw := NewWriterWithCompressor(wantBuf, cl1)
+ sw.MinChunkSize = minChunkSize
+ sw.ChunkSize = tt.chunkSize
+ if err := sw.AppendTar(tarBlob); err != nil {
+ t.Fatalf("failed to append tar to want stargz: %v", err)
+ }
+ if _, err := sw.Close(); err != nil {
+ t.Fatalf("failed to prepare want stargz: %v", err)
+ }
+ wantData := wantBuf.Bytes()
+ want, err := Open(io.NewSectionReader(
+ bytes.NewReader(wantData), 0, int64(len(wantData))),
+ WithDecompressors(cl1),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the want stargz: %v", err)
+ }
+
+ // Prepare testing data
+ var opts []Option
+ if minChunkSize > 0 {
+ opts = append(opts, WithMinChunkSize(minChunkSize))
+ }
+ cl2 := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
+ if err != nil {
+ t.Fatalf("failed to build stargz: %v", err)
+ }
+ defer rc.Close()
+ gotBuf := new(bytes.Buffer)
+ if _, err := io.Copy(gotBuf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ gotData := gotBuf.Bytes()
+ got, err := Open(io.NewSectionReader(
+ bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
+ WithDecompressors(cl2),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the got stargz: %v", err)
+ }
+
+ // Check DiffID is properly calculated
+ rc.Close()
+ diffID := rc.DiffID()
+ wantDiffID := cl2.DiffIDOf(t, gotData)
+ if diffID.String() != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ // Compare as stargz
+ if !isSameVersion(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz hasn't same json")
+ return
+ }
+ if !isSameEntries(t, want, got) {
+ t.Errorf("built stargz isn't same as the original")
+ return
+ }
+
+ // Compare as tar.gz
+ if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
+ t.Errorf("built stargz isn't same tar.gz")
+ return
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aGz, err := cla.Reader(bytes.NewReader(a))
+ if err != nil {
+ t.Fatalf("failed to read A")
+ }
+ defer aGz.Close()
+ bGz, err := clb.Reader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("failed to read B")
+ }
+ defer bGz.Close()
+
+ // Same as tar's Next() method but ignores landmarks and TOCJSON file
+ next := func(r *tar.Reader) (h *tar.Header, err error) {
+ for {
+ if h, err = r.Next(); err != nil {
+ return
+ }
+ if h.Name != PrefetchLandmark &&
+ h.Name != NoPrefetchLandmark &&
+ h.Name != TOCTarName {
+ return
+ }
+ }
+ }
+
+ aTar := tar.NewReader(aGz)
+ bTar := tar.NewReader(bGz)
+ for {
+ // Fetch and parse next header.
+ aH, aErr := next(aTar)
+ bH, bErr := next(bTar)
+ if aErr != nil || bErr != nil {
+ if aErr == io.EOF && bErr == io.EOF {
+ break
+ }
+ t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr)
+ }
+ if !reflect.DeepEqual(aH, bH) {
+ t.Logf("different header (A = %v; B = %v)", aH, bH)
+ return false
+
+ }
+ aFile, err := io.ReadAll(aTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of A")
+ }
+ bFile, err := io.ReadAll(bTar)
+ if err != nil {
+ t.Fatal("failed to read tar payload of B")
+ }
+ if !bytes.Equal(aFile, bFile) {
+ t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b))
+ return false
+ }
+ }
+
+ return true
+}
+
+func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+ aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
+ if err != nil {
+ t.Fatalf("failed to parse A: %v", err)
+ }
+ bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
+ if err != nil {
+ t.Fatalf("failed to parse B: %v", err)
+ }
+ t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC))
+ t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC))
+ return aJTOC.Version == bJTOC.Version
+}
+
+func isSameEntries(t *testing.T, a, b *Reader) bool {
+ aroot, ok := a.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of A")
+ }
+ broot, ok := b.Lookup("")
+ if !ok {
+ t.Fatalf("failed to get root of B")
+ }
+ aEntry := stargzEntry{aroot, a}
+ bEntry := stargzEntry{broot, b}
+ return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry)
+}
+
+func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader {
+ buf := new(bytes.Buffer)
+ var w io.WriteCloser
+ var err error
+ if srcCompression == gzipType {
+ w = gzip.NewWriter(buf)
+ } else if srcCompression == zstdType {
+ w, err = zstd.NewWriter(buf)
+ if err != nil {
+ t.Fatalf("failed to init zstd writer: %v", err)
+ }
+ } else {
+ return src
+ }
+ src.Seek(0, io.SeekStart)
+ if _, err := io.Copy(w, src); err != nil {
+ t.Fatalf("failed to compress source")
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("failed to finalize compress source")
+ }
+ data := buf.Bytes()
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+
+}
+
+type stargzEntry struct {
+ e *TOCEntry
+ r *Reader
+}
+
+// contains checks if all child entries in "b" are also contained in "a".
+// This function also checks if the files/chunks contain the same contents among "a" and "b".
+func contains(t *testing.T, a, b stargzEntry) bool {
+ ae, ar := a.e, a.r
+ be, br := b.e, b.r
+ t.Logf("Comparing: %q vs %q", ae.Name, be.Name)
+ if !equalEntry(ae, be) {
+ t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be)
+ return false
+ }
+ if ae.Type == "dir" {
+ t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name,
+ allChildrenName(ae), allChildrenName(be))
+ iscontain := true
+ ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool {
+ // Walk through all files on this stargz file.
+
+ if aChild.Name == PrefetchLandmark ||
+ aChild.Name == NoPrefetchLandmark {
+ return true // Ignore landmarks
+ }
+
+ // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory
+ // because this points to the root directory itself.
+ if aChild.Name == "" && ae.Name == "" {
+ return true
+ }
+
+ bChild, ok := be.LookupChild(aBaseName)
+ if !ok {
+ t.Logf("%q (base: %q): not found in b: %v",
+ ae.Name, aBaseName, allChildrenName(be))
+ iscontain = false
+ return false
+ }
+
+ childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r})
+ if !childcontain {
+ t.Logf("%q != %q: non-equal dir", ae.Name, be.Name)
+ iscontain = false
+ return false
+ }
+ return true
+ })
+ return iscontain
+ } else if ae.Type == "reg" {
+ af, err := ar.OpenFile(ae.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on A: %v", ae.Name, err)
+ }
+ bf, err := br.OpenFile(be.Name)
+ if err != nil {
+ t.Fatalf("failed to open file %q on B: %v", be.Name, err)
+ }
+
+ var nr int64
+ for nr < ae.Size {
+ abytes, anext, aok := readOffset(t, af, nr, a)
+ bbytes, bnext, bok := readOffset(t, bf, nr, b)
+ if !aok && !bok {
+ break
+ } else if !(aok && bok) || anext != bnext {
+ t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v",
+ ae.Name, be.Name, nr, aok, bok, anext, bnext)
+ return false
+ }
+ nr = anext
+ if !bytes.Equal(abytes, bbytes) {
+ t.Logf("%q != %q: different contents %v vs %v",
+ ae.Name, be.Name, string(abytes), string(bbytes))
+ return false
+ }
+ }
+ return true
+ }
+
+ return true
+}
+
+func allChildrenName(e *TOCEntry) (children []string) {
+ e.ForeachChild(func(baseName string, _ *TOCEntry) bool {
+ children = append(children, baseName)
+ return true
+ })
+ return
+}
+
+func equalEntry(a, b *TOCEntry) bool {
+ // Here, we selectively compare fileds that we are interested in.
+ return a.Name == b.Name &&
+ a.Type == b.Type &&
+ a.Size == b.Size &&
+ a.ModTime3339 == b.ModTime3339 &&
+ a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time
+ a.LinkName == b.LinkName &&
+ a.Mode == b.Mode &&
+ a.UID == b.UID &&
+ a.GID == b.GID &&
+ a.Uname == b.Uname &&
+ a.Gname == b.Gname &&
+ (a.Offset >= 0) == (b.Offset >= 0) &&
+ (a.NextOffset() > 0) == (b.NextOffset() > 0) &&
+ a.DevMajor == b.DevMajor &&
+ a.DevMinor == b.DevMinor &&
+ a.NumLink == b.NumLink &&
+ reflect.DeepEqual(a.Xattrs, b.Xattrs) &&
+ // chunk-related infomations aren't compared in this function.
+ // ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ // ChunkSize int64 `json:"chunkSize,omitempty"`
+ // children map[string]*TOCEntry
+ a.Digest == b.Digest
+}
+
+func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
+ ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset)
+ if !ok {
+ return nil, 0, false
+ }
+ data := make([]byte, ce.ChunkSize)
+ t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset())
+ n, err := r.ReadAt(data, ce.ChunkOffset)
+ if err != nil {
+ t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v",
+ e.e.Name, ce.ChunkOffset, ce.ChunkSize, err)
+ }
+ if int64(n) != ce.ChunkSize {
+ t.Fatalf("unexpected copied data size %d; want %d",
+ n, ce.ChunkSize)
+ }
+ return data[:n], offset + ce.ChunkSize, true
+}
+
+func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
+ jtocData, err := json.Marshal(*tocJSON)
+ if err != nil {
+ t.Fatalf("failed to marshal TOC JSON: %v", err)
+ }
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil {
+ t.Fatalf("failed to read toc json blob: %v", err)
+ }
+ return buf.String()
+}
+
+const chunkSize = 3
+
+// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
+type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
+
+// testDigestAndVerify runs specified checks against sample stargz blobs.
+func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
+ tests := []struct {
+ name string
+ tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
+ checks []check
+ minChunkSize []int
+ }{
+ {
+ name: "no-regfile",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ dir("test/"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ dir("test2/"), // modified
+ ), allowedPrefix[0])),
+ },
+ },
+ {
+ name: "small-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "bbb", dgstMap),
+ )
+ },
+ minChunkSize: []int{0, 64000},
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", ""),
+ file("foo.txt", "M"), // modified
+ dir("test/"),
+ file("test/bar.txt", "bbb"),
+ ), allowedPrefix[0])),
+ // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO
+ checkVerifyBrokenContentFail("foo.txt"),
+ },
+ },
+ {
+ name: "big-files",
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
+ file("foo.txt", "a"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ {
+ name: "with-non-regfiles",
+ minChunkSize: []int{0, 64000},
+ tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ return tarOf(
+ regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
+ regDigest(t, "foo.txt", "a", dgstMap),
+ regDigest(t, "bar/foo2.txt", "b", dgstMap),
+ regDigest(t, "foo3.txt", "c", dgstMap),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
+ dir("test2/"),
+ link("test2/bazlink", "baz.txt"),
+ )
+ },
+ checks: []check{
+ checkStargzTOC,
+ checkVerifyTOC,
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
+ file("baz.txt", "bazbazbazbazbazbazbaz"),
+ file("foo.txt", "a"),
+ file("bar/foo2.txt", "b"),
+ file("foo3.txt", "c"),
+ symlink("barlink", "test/bar.txt"),
+ dir("test/"),
+ file("test/bar.txt", "testbartestbar"),
+ dir("test2/"),
+ link("test2/bazlink", "foo.txt"), // modified
+ ), allowedPrefix[0])),
+ checkVerifyInvalidTOCEntryFail("test/bar.txt"),
+ checkVerifyBrokenContentFail("test/bar.txt"),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ if len(tt.minChunkSize) == 0 {
+ tt.minChunkSize = []int{0}
+ }
+ for _, srcCompression := range srcCompressions {
+ srcCompression := srcCompression
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, minChunkSize := range tt.minChunkSize {
+ minChunkSize := minChunkSize
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
+ // Get original tar file and chunk digests
+ dgstMap := make(map[string]digest.Digest)
+ tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
+
+ cl := newCL()
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ tocDigest := rc.TOCDigest()
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ newStargz := buf.Bytes()
+ // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
+ dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
+
+ for _, check := range tt.checks {
+ check(t, newStargz, tocDigest, dgstMap, cl, newCL)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
+// digest and contains valid chunks. It walks all entries in the stargz and
+// checks all chunk digests stored to the TOC JSON match the actual contents.
+func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ digestMapTOC, err := listDigests(io.NewSectionReader(
+ bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ controller,
+ )
+ if err != nil {
+ t.Fatalf("failed to list digest: %v", err)
+ }
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ // Check the digest of TOC JSON based on the actual contents
+ // It's sure that TOC JSON exists in this archive because
+ // Open succeeded.
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(dgstr.Hash(), tr); err != nil {
+ t.Fatalf("failed to calculate digest of TOC JSON: %v",
+ err)
+ }
+ if dgstr.Digest() != tocDigest {
+ t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest())
+ }
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ // Get the original digest to make sure the file contents are kept unchanged
+ // from the original tar, during the whole conversion steps.
+ id := chunkID(h.Name, n, ce.ChunkSize)
+ want, ok := dgstMap[id]
+ if !ok {
+ t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v",
+ h.Name, n, ce.ChunkSize, dgstMap)
+ return
+ }
+ found[id] = true
+
+ // Check the file contents
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if want != dgstr.Digest() {
+ t.Errorf("Invalid contents in converted stargz %q: %q; want %q",
+ h.Name, dgstr.Digest(), want)
+ return
+ }
+
+ // Check the digest stored in TOC JSON
+ dgstTOC, ok := digestMapTOC[ce.Offset]
+ if !ok {
+ t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered",
+ h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset)
+ }
+ if want != dgstTOC {
+ t.Errorf("Invalid digest in TOCEntry %q: %q; want %q",
+ h.Name, dgstTOC, want)
+ return
+ }
+
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyTOC checks the verification works for the TOC JSON of the passed
+// stargz. It walks all entries in the stargz and checks the verifications for
+// all chunks work.
+func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Errorf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Errorf("failed to verify stargz: %v", err)
+ return
+ }
+
+ found := make(map[string]bool)
+ for id := range dgstMap {
+ found[id] = false
+ }
+ zr, err := controller.Reader(bytes.NewReader(sgzData))
+ if err != nil {
+ t.Fatalf("failed to decompress converted stargz: %v", err)
+ }
+ defer zr.Close()
+ tr := tar.NewReader(zr)
+ for {
+ h, err := tr.Next()
+ if err != nil {
+ if err != io.EOF {
+ t.Errorf("failed to read tar entry: %v", err)
+ return
+ }
+ break
+ }
+ if h.Name == TOCTarName {
+ continue
+ }
+ if _, ok := sgz.Lookup(h.Name); !ok {
+ t.Errorf("lost stargz entry %q in the converted TOC", h.Name)
+ return
+ }
+ var n int64
+ for n < h.Size {
+ ce, ok := sgz.ChunkEntryForOffset(h.Name, n)
+ if !ok {
+ t.Errorf("lost chunk %q(offset=%d) in the converted TOC",
+ h.Name, n)
+ return
+ }
+
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n)
+ }
+
+ found[chunkID(h.Name, n, ce.ChunkSize)] = true
+
+ // Check the file contents
+ if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ h.Name, n, ce.ChunkSize)
+ }
+ if !v.Verified() {
+ t.Errorf("Invalid contents in converted stargz %q (should be succeeded)",
+ h.Name)
+ return
+ }
+ n += ce.ChunkSize
+ }
+ }
+
+ for id, ok := range found {
+ if !ok {
+ t.Errorf("required chunk %q not found in the converted stargz: %v", id, found)
+ }
+ }
+}
+
+// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
+// detected during the verification and the verification returns an error.
+func checkVerifyInvalidTOCEntryFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ funcs := map[string]rewriteFunc{
+ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var found bool
+ for _, e := range toc.Entries {
+ if cleanEntryName(e.Name) == filename {
+ if e.Type != "reg" && e.Type != "chunk" {
+ t.Fatalf("entry %q to break must be regfile or chunk", filename)
+ }
+ if e.ChunkDigest == "" {
+ t.Fatalf("entry %q is already invalid", filename)
+ }
+ e.ChunkDigest = ""
+ found = true
+ }
+ }
+ if !found {
+ t.Fatalf("rewrite target not found")
+ }
+ },
+ "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ var (
+ sampleEntry *TOCEntry
+ targetEntry *TOCEntry
+ )
+ for _, e := range toc.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if cleanEntryName(e.Name) == filename {
+ targetEntry = e
+ } else {
+ sampleEntry = e
+ }
+ }
+ }
+ if sampleEntry == nil {
+ t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
+ }
+ if targetEntry == nil {
+ t.Fatalf("rewrite target not found")
+ }
+ targetEntry.Offset = sampleEntry.Offset
+ },
+ }
+
+ for name, rFunc := range funcs {
+ t.Run(name, func(t *testing.T) {
+ newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller)
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, newSgz); err != nil {
+ t.Fatalf("failed to get converted stargz")
+ }
+ isgz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(newTocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ })
+ }
+ }
+}
+
+// checkVerifyInvalidStargzFail checks if the verification detects that the
+// given stargz file doesn't match to the expected digest and returns error.
+func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ cl := newController()
+ rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ mStargz := buf.Bytes()
+
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
+ WithDecompressors(cl),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ _, err = sgz.VerifyTOC(tocDigest)
+ if err == nil {
+ t.Errorf("must fail for invalid TOC")
+ return
+ }
+ }
+}
+
+// checkVerifyBrokenContentFail checks if the verifier detects broken contents
+// that doesn't match to the expected digest and returns error.
+func checkVerifyBrokenContentFail(filename string) check {
+ return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ // Parse stargz file
+ sgz, err := Open(
+ io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
+ WithDecompressors(controller),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse converted stargz: %v", err)
+ return
+ }
+ ev, err := sgz.VerifyTOC(tocDigest)
+ if err != nil {
+ t.Fatalf("failed to verify stargz: %v", err)
+ return
+ }
+
+ // Open the target file
+ sr, err := sgz.OpenFile(filename)
+ if err != nil {
+ t.Fatalf("failed to open file %q", filename)
+ }
+ ce, ok := sgz.ChunkEntryForOffset(filename, 0)
+ if !ok {
+ t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0)
+ return
+ }
+ if ce.ChunkSize == 0 {
+ t.Fatalf("file mustn't be empty")
+ return
+ }
+ data := make([]byte, ce.ChunkSize)
+ if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil {
+ t.Errorf("failed to get data of a chunk of %q(offset=%q)",
+ filename, ce.ChunkOffset)
+ }
+
+ // Check the broken chunk (must fail)
+ v, err := ev.Verifier(ce)
+ if err != nil {
+ t.Fatalf("failed to get verifier for %q", filename)
+ }
+ broken := append([]byte{^data[0]}, data[1:]...)
+ if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil {
+ t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)",
+ filename, ce.ChunkOffset, ce.ChunkSize)
+ }
+ if v.Verified() {
+ t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)",
+ filename, data, broken)
+ }
+ }
+}
+
+func chunkID(name string, offset, size int64) string {
+ return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size)
+}
+
+type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader)
+
+func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
+ decodedJTOC, jtocOffset, err := parseStargz(sgz, controller)
+ if err != nil {
+ t.Fatalf("failed to extract TOC JSON: %v", err)
+ }
+
+ rewrite(t, decodedJTOC, sgz)
+
+ tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset)
+ if err != nil {
+ t.Fatalf("failed to create toc and footer: %v", err)
+ }
+
+ // Reconstruct stargz file with the modified TOC JSON
+ if _, err := sgz.Seek(0, io.SeekStart); err != nil {
+ t.Fatalf("failed to reset the seek position of stargz: %v", err)
+ }
+ return io.MultiReader(
+ io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON)
+ tocFooter, // Rewritten TOC and footer
+ ), tocDigest
+}
+
+func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) {
+ decodedJTOC, _, err := parseStargz(sgz, controller)
+ if err != nil {
+ return nil, err
+ }
+ digestMap := make(map[int64]digest.Digest)
+ for _, e := range decodedJTOC.Entries {
+ if e.Type == "reg" || e.Type == "chunk" {
+ if e.Type == "reg" && e.Size == 0 {
+ continue // ignores empty file
+ }
+ if e.ChunkDigest == "" {
+ return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
+ e.Name, e.Offset)
+ }
+ d, err := digest.Parse(e.ChunkDigest)
+ if err != nil {
+ return nil, err
+ }
+ digestMap[e.Offset] = d
+ }
+ }
+ return digestMap, nil
+}
+
+func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) {
+ fSize := controller.FooterSize()
+ footer := make([]byte, fSize)
+ if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
+ return nil, 0, fmt.Errorf("error reading footer: %w", err)
+ }
+ _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
+ }
+
+ // Decode the TOC JSON
+ var tocReader io.Reader
+ if tocOffset >= 0 {
+ tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
+ }
+ decodedJTOC, _, err = controller.ParseTOC(tocReader)
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
+ }
+ return decodedJTOC, tocOffset, nil
+}
+
+func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
+ const content = "Some contents"
+ invalidUtf8 := "\xff\xfe\xfd"
+
+ xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
+ sampleOwner := owner{uid: 50, gid: 100}
+
+ data64KB := randomContents(64000)
+
+ tests := []struct {
+ name string
+ chunkSize int
+ minChunkSize int
+ in []tarEntry
+ want []stargzCheck
+ wantNumGz int // expected number of streams
+
+ wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
+ wantFailOnLossLess bool
+ wantTOCVersion int // default = 1
+ }{
+ {
+ name: "empty",
+ in: tarOf(),
+ wantNumGz: 2, // (empty tar) + TOC + footer
+ want: checks(
+ numTOCEntries(0),
+ ),
+ },
+ {
+ name: "1dir_1empty_file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", ""),
+ ),
+ wantNumGz: 3, // dir, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", 0),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileDigest("foo/bar.txt", digestFor("")),
+ ),
+ },
+ {
+ name: "1dir_1file",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/bar.txt", content, xAttrFile),
+ ),
+ wantNumGz: 4, // var dir, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(2),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ hasFileDigest("foo/bar.txt", digestFor(content)),
+ hasFileContentsRange("foo/bar.txt", 0, content),
+ hasFileContentsRange("foo/bar.txt", 1, content[1:]),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasFileXattrs("foo/bar.txt", "foo", "bar"),
+ hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8),
+ ),
+ },
+ {
+ name: "2meta_2file",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ },
+ {
+ name: "3dir",
+ in: tarOf(
+ dir("bar/"),
+ dir("foo/"),
+ dir("foo/bar/"),
+ ),
+ wantNumGz: 3, // 3 dirs, TOC, footer
+ want: checks(
+ hasDirLinkCount("bar/", 2),
+ hasDirLinkCount("foo/", 3),
+ hasDirLinkCount("foo/bar/", 2),
+ ),
+ },
+ {
+ name: "symlink",
+ in: tarOf(
+ dir("foo/"),
+ symlink("foo/bar", "../../x"),
+ ),
+ wantNumGz: 3, // metas + TOC + footer
+ want: checks(
+ numTOCEntries(2),
+ hasSymlink("foo/bar", "../../x"),
+ entryHasChildren("", "foo"),
+ entryHasChildren("foo", "bar"),
+ ),
+ },
+ {
+ name: "chunked_file",
+ chunkSize: 4,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
+ ),
+ wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
+ want: checks(
+ numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
+ hasDir("foo/"),
+ hasFileLen("foo/big.txt", len("This is such a big file")),
+ hasFileDigest("foo/big.txt", digestFor("This is such a big file")),
+ hasFileContentsRange("foo/big.txt", 0, "This is such a big file"),
+ hasFileContentsRange("foo/big.txt", 1, "his is such a big file"),
+ hasFileContentsRange("foo/big.txt", 2, "is is such a big file"),
+ hasFileContentsRange("foo/big.txt", 3, "s is such a big file"),
+ hasFileContentsRange("foo/big.txt", 4, " is such a big file"),
+ hasFileContentsRange("foo/big.txt", 5, "is such a big file"),
+ hasFileContentsRange("foo/big.txt", 6, "s such a big file"),
+ hasFileContentsRange("foo/big.txt", 7, " such a big file"),
+ hasFileContentsRange("foo/big.txt", 8, "such a big file"),
+ hasFileContentsRange("foo/big.txt", 9, "uch a big file"),
+ hasFileContentsRange("foo/big.txt", 10, "ch a big file"),
+ hasFileContentsRange("foo/big.txt", 11, "h a big file"),
+ hasFileContentsRange("foo/big.txt", 12, " a big file"),
+ hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""),
+ hasChunkEntries("foo/big.txt", 6),
+ ),
+ },
+ {
+ name: "recursive",
+ in: tarOf(
+ dir("/", sampleOwner),
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ ),
+ wantNumGz: 4, // dirs, bar.txt alone, TOC, footer
+ want: checks(
+ maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt"
+ ),
+ },
+ {
+ name: "block_char_fifo",
+ in: tarOf(
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "b",
+ Typeflag: tar.TypeBlock,
+ Devmajor: 123,
+ Devminor: 456,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "c",
+ Typeflag: tar.TypeChar,
+ Devmajor: 111,
+ Devminor: 222,
+ Format: format,
+ })
+ }),
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Name: prefix + "f",
+ Typeflag: tar.TypeFifo,
+ Format: format,
+ })
+ }),
+ ),
+ wantNumGz: 3,
+ want: checks(
+ lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}),
+ lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}),
+ lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}),
+ ),
+ },
+ {
+ name: "modes",
+ in: tarOf(
+ dir("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ file("foo1/bar1", content, 0700|os.ModeSetuid),
+ file("foo1/bar2", content, 0755|os.ModeSetgid),
+ dir("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ file("foo2/bar3", content, 0755|os.ModeSticky),
+ dir("foo3/", 0755|os.ModeDir),
+ file("foo3/bar4", content, os.FileMode(0700)),
+ file("foo3/bar5", content, os.FileMode(0755)),
+ ),
+ wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer
+ want: checks(
+ hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid),
+ hasMode("foo1/bar1", 0700|os.ModeSetuid),
+ hasMode("foo1/bar2", 0755|os.ModeSetgid),
+ hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky),
+ hasMode("foo2/bar3", 0755|os.ModeSticky),
+ hasMode("foo3/", 0755|os.ModeDir),
+ hasMode("foo3/bar4", os.FileMode(0700)),
+ hasMode("foo3/bar5", os.FileMode(0755)),
+ ),
+ },
+ {
+ name: "lossy",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ wantFailOnLossLess: true,
+ },
+ {
+ name: "hardlink should be replaced to the destination entry",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", "test"),
+ link("foolink", "foo/foo1"),
+ ),
+ wantNumGz: 4, // dir, foo1 + link, TOC, footer
+ want: checks(
+ mustSameEntry("foo/foo1", "foolink"),
+ ),
+ },
+ {
+ name: "several_files_in_chunk",
+ minChunkSize: 8000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ file("foo22", "ccc"),
+ dir("bar/"),
+ file("bar/bar.txt", "aaa"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed "data64KB" is still larger than 8KB
+ wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo22", len("ccc")),
+ hasFileLen("bar/bar.txt", len("aaa")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo22", digestFor("ccc")),
+ hasFileDigest("bar/bar.txt", digestFor("aaa")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo22", 0, "ccc"),
+ hasFileContentsRange("foo22", 1, "cc"),
+ hasFileContentsRange("foo22", 2, "c"),
+ hasFileContentsRange("bar/bar.txt", 0, "aaa"),
+ hasFileContentsRange("bar/bar.txt", 1, "aa"),
+ hasFileContentsRange("bar/bar.txt", 2, "a"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ {
+ name: "several_files_in_chunk_chunked",
+ minChunkSize: 8000,
+ chunkSize: 32000,
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", data64KB),
+ file("foo2", "bb"),
+ dir("bar/"),
+ file("foo3", data64KB),
+ ),
+ // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
+ wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
+ want: checks(
+ numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
+ hasDir("foo/"),
+ hasDir("bar/"),
+ hasFileLen("foo/foo1", len(data64KB)),
+ hasFileLen("foo2", len("bb")),
+ hasFileLen("foo3", len(data64KB)),
+ hasFileDigest("foo/foo1", digestFor(data64KB)),
+ hasFileDigest("foo2", digestFor("bb")),
+ hasFileDigest("foo3", digestFor(data64KB)),
+ hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
+ hasFileContentsRange("foo/foo1", 0, data64KB),
+ hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
+ hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ hasFileContentsRange("foo2", 0, "bb"),
+ hasFileContentsRange("foo2", 1, "b"),
+ hasFileContentsRange("foo3", 0, data64KB),
+ hasFileContentsRange("foo3", 1, data64KB[1:]),
+ hasFileContentsRange("foo3", 2, data64KB[2:]),
+ hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
+ hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
+ ),
+ },
+ }
+
+ for _, tt := range tests {
+ for _, newCL := range controllers {
+ newCL := newCL
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, lossless := range []bool{true, false} {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
+ var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
+ origTarDgstr := digest.Canonical.Digester()
+ tr = io.TeeReader(tr, origTarDgstr.Hash())
+ var stargzBuf bytes.Buffer
+ cl1 := newCL()
+ w := NewWriterWithCompressor(&stargzBuf, cl1)
+ w.ChunkSize = tt.chunkSize
+ w.MinChunkSize = tt.minChunkSize
+ if lossless {
+ err := w.AppendTarLossLess(tr)
+ if tt.wantFailOnLossLess {
+ if err != nil {
+ return // expected to fail
+ }
+ t.Fatalf("Append wanted to fail on lossless")
+ }
+ if err != nil {
+ t.Fatalf("Append(lossless): %v", err)
+ }
+ } else {
+ if err := w.AppendTar(tr); err != nil {
+ t.Fatalf("Append: %v", err)
+ }
+ }
+ if _, err := w.Close(); err != nil {
+ t.Fatalf("Writer.Close: %v", err)
+ }
+ b := stargzBuf.Bytes()
+
+ if lossless {
+ // Check if the result blob reserves original tar metadata
+ rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
+ if err != nil {
+ t.Errorf("failed to decompress blob: %v", err)
+ return
+ }
+ defer rc.Close()
+ resultDgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
+ t.Errorf("failed to read result decompressed blob: %v", err)
+ return
+ }
+ if resultDgstr.Digest() != origTarDgstr.Digest() {
+ t.Errorf("lossy compression occurred: digest=%v; want %v",
+ resultDgstr.Digest(), origTarDgstr.Digest())
+ return
+ }
+ }
+
+ diffID := w.DiffID()
+ wantDiffID := cl1.DiffIDOf(t, b)
+ if diffID != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
+
+ telemetry, checkCalled := newCalledTelemetry()
+ sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
+ r, err := Open(
+ sr,
+ WithDecompressors(cl1),
+ WithTelemetry(telemetry),
+ )
+ if err != nil {
+ t.Fatalf("stargz.Open: %v", err)
+ }
+ wantTOCVersion := 1
+ if tt.wantTOCVersion > 0 {
+ wantTOCVersion = tt.wantTOCVersion
+ }
+ if r.toc.Version != wantTOCVersion {
+ t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
+ }
+
+ footerSize := cl1.FooterSize()
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ t.Errorf("failed to read footer: %v", err)
+ }
+ _, tocOffset, _, err := cl1.ParseFooter(footer)
+ if err != nil {
+ t.Errorf("failed to parse footer: %v", err)
+ }
+ if err := checkCalled(tocOffset >= 0); err != nil {
+ t.Errorf("telemetry failure: %v", err)
+ }
+
+ wantNumGz := tt.wantNumGz
+ if lossless && tt.wantNumGzLossLess > 0 {
+ wantNumGz = tt.wantNumGzLossLess
+ }
+ streamOffsets := []int64{0}
+ prevOffset := int64(-1)
+ streams := 0
+ for _, e := range r.toc.Entries {
+ if e.Offset > prevOffset {
+ streamOffsets = append(streamOffsets, e.Offset)
+ prevOffset = e.Offset
+ streams++
+ }
+ }
+ streams++ // TOC
+ if tocOffset >= 0 {
+ // toc is in the blob
+ streamOffsets = append(streamOffsets, tocOffset)
+ }
+ streams++ // footer
+ streamOffsets = append(streamOffsets, footerOffset)
+ if streams != wantNumGz {
+ t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
+ }
+
+ t.Logf("testing streams: %+v", streamOffsets)
+ cl1.TestStreams(t, b, streamOffsets)
+
+ for _, want := range tt.want {
+ want.check(t, r)
+ }
+ })
+ }
+ }
+ }
+ }
+ }
+}
+
+type chunkInfo struct {
+ name string
+ data string
+}
+
+func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
+ var getFooterLatencyCalled bool
+ var getTocLatencyCalled bool
+ var deserializeTocLatencyCalled bool
+ return &Telemetry{
+ func(time.Time) { getFooterLatencyCalled = true },
+ func(time.Time) { getTocLatencyCalled = true },
+ func(time.Time) { deserializeTocLatencyCalled = true },
+ }, func(needsGetTOC bool) error {
+ var allErr []error
+ if !getFooterLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
+ }
+ if needsGetTOC {
+ if !getTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
+ }
+ }
+ if !deserializeTocLatencyCalled {
+ allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
+ }
+ return errorutil.Aggregate(allErr)
+ }
+}
+
+func digestFor(content string) string {
+ sum := sha256.Sum256([]byte(content))
+ return fmt.Sprintf("sha256:%x", sum)
+}
+
+type numTOCEntries int
+
+func (n numTOCEntries) check(t *testing.T, r *Reader) {
+ if r.toc == nil {
+ t.Fatal("nil TOC")
+ }
+ if got, want := len(r.toc.Entries), int(n); got != want {
+ t.Errorf("got %d TOC entries; want %d", got, want)
+ }
+ t.Logf("got TOC entries:")
+ for i, ent := range r.toc.Entries {
+ entj, _ := json.Marshal(ent)
+ t.Logf(" [%d]: %s\n", i, entj)
+ }
+ if t.Failed() {
+ t.FailNow()
+ }
+}
+
+func checks(s ...stargzCheck) []stargzCheck { return s }
+
+type stargzCheck interface {
+ check(t *testing.T, r *Reader)
+}
+
+type stargzCheckFn func(*testing.T, *Reader)
+
+func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) }
+
+func maxDepth(max int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup("")
+ if !ok {
+ t.Fatal("root directory not found")
+ }
+ d, err := getMaxDepth(t, e, 0, 10*max)
+ if err != nil {
+ t.Errorf("failed to get max depth (wanted %d): %v", max, err)
+ return
+ }
+ if d != max {
+ t.Errorf("invalid depth %d; want %d", d, max)
+ return
+ }
+ })
+}
+
+func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) {
+ if current > limit {
+ return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d",
+ current, limit)
+ }
+ max = current
+ e.ForeachChild(func(baseName string, ent *TOCEntry) bool {
+ t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name)
+ d, err := getMaxDepth(t, ent, current+1, limit)
+ if err != nil {
+ rErr = err
+ return false
+ }
+ if d > max {
+ max = d
+ }
+ return true
+ })
+ return
+}
+
+func hasFileLen(file string, wantLen int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ } else if ent.Size != int64(wantLen) {
+ t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen)
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileXattrs(file, name, value string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "reg" {
+ t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type)
+ }
+ if ent.Xattrs == nil {
+ t.Errorf("file %q has no xattrs", file)
+ return
+ }
+ valueFound, found := ent.Xattrs[name]
+ if !found {
+ t.Errorf("file %q has no xattr %q", file, name)
+ return
+ }
+ if string(valueFound) != value {
+ t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value)
+ }
+
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasFileDigest(file string, digest string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for file %q", file)
+ }
+ if ent.Digest != digest {
+ t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest)
+ }
+ })
+}
+
+func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ extraMap := make(map[string]chunkInfo)
+ for _, e := range extra {
+ extraMap[e.name] = e
+ }
+ var extraNames []string
+ for n := range extraMap {
+ extraNames = append(extraNames, n)
+ }
+ f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
+ t.Logf("On %q: got preread of %q", file, e.Name)
+ ex, ok := extraMap[e.Name]
+ if !ok {
+ t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
+ }
+ got, err := io.ReadAll(cr)
+ if err != nil {
+ t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
+ }
+ if ex.data != string(got) {
+ t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
+ }
+ delete(extraMap, e.Name)
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ if len(extraMap) != 0 {
+ var exNames []string
+ for _, ex := range extraMap {
+ exNames = append(exNames, ex.name)
+ }
+ t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
+ }
+ })
+}
+
+func hasFileContentsRange(file string, offset int, want string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ f, err := r.OpenFile(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := make([]byte, len(want))
+ n, err := f.ReadAt(got, int64(offset))
+ if err != nil {
+ t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
+ }
+ if string(got) != want {
+ t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
+ }
+ })
+}
+
+func hasChunkEntries(file string, wantChunks int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(file)
+ if !ok {
+ t.Fatalf("no file for %q", file)
+ }
+ if ent.Type != "reg" {
+ t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type)
+ }
+ chunks := r.getChunks(ent)
+ if len(chunks) != wantChunks {
+ t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks)
+ return
+ }
+ f := chunks[0]
+
+ var gotChunks []*TOCEntry
+ var last *TOCEntry
+ for off := int64(0); off < f.Size; off++ {
+ e, ok := r.ChunkEntryForOffset(file, off)
+ if !ok {
+ t.Errorf("no ChunkEntryForOffset at %d", off)
+ return
+ }
+ if last != e {
+ gotChunks = append(gotChunks, e)
+ last = e
+ }
+ }
+ if !reflect.DeepEqual(chunks, gotChunks) {
+ t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks)
+ }
+
+ // And verify the NextOffset
+ for i := 0; i < len(gotChunks)-1; i++ {
+ ci := gotChunks[i]
+ cnext := gotChunks[i+1]
+ if ci.NextOffset() != cnext.Offset {
+ t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset)
+ }
+ }
+ })
+}
+
+func entryHasChildren(dir string, want ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ want := append([]string(nil), want...)
+ var got []string
+ ent, ok := r.Lookup(dir)
+ if !ok {
+ t.Fatalf("didn't find TOCEntry for dir node %q", dir)
+ }
+ for baseName := range ent.children {
+ got = append(got, baseName)
+ }
+ sort.Strings(got)
+ sort.Strings(want)
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("children of %q = %q; want %q", dir, got, want)
+ }
+ })
+}
+
+func hasDir(file string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasDirLinkCount(file string, count int) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Type != "dir" {
+ t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type)
+ return
+ }
+ if ent.NumLink != count {
+ t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count)
+ }
+ return
+ }
+ }
+ t.Errorf("directory %q not found", file)
+ })
+}
+
+func hasMode(file string, mode os.FileMode) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == cleanEntryName(file) {
+ if ent.Stat().Mode() != mode {
+ t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode)
+ return
+ }
+ return
+ }
+ }
+ t.Errorf("file %q not found", file)
+ })
+}
+
+func hasSymlink(file, target string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ for _, ent := range r.toc.Entries {
+ if ent.Name == file {
+ if ent.Type != "symlink" {
+ t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type)
+ } else if ent.LinkName != target {
+ t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target)
+ }
+ return
+ }
+ }
+ t.Errorf("symlink %q not found", file)
+ })
+}
+
+func lookupMatch(name string, want *TOCEntry) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ e, ok := r.Lookup(name)
+ if !ok {
+ t.Fatalf("failed to Lookup entry %q", name)
+ }
+ if !reflect.DeepEqual(e, want) {
+ t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want)
+ }
+
+ })
+}
+
+func hasEntryOwner(entry string, owner owner) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ ent, ok := r.Lookup(strings.TrimSuffix(entry, "/"))
+ if !ok {
+ t.Errorf("entry %q not found", entry)
+ return
+ }
+ if ent.UID != owner.uid || ent.GID != owner.gid {
+ t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid)
+ return
+ }
+ })
+}
+
+func mustSameEntry(files ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ var first *TOCEntry
+ for _, f := range files {
+ if first == nil {
+ var ok bool
+ first, ok = r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown first file on Lookup: %q", f)
+ return
+ }
+ }
+
+ // Test Lookup
+ e, ok := r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown file on Lookup: %q", f)
+ return
+ }
+ if e != first {
+ t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test LookupChild
+ pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get parent of %q", f)
+ return
+ }
+ e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get %q as the child of %+v", f, pe)
+ return
+ }
+ if e != first {
+ t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test ForeachChild
+ pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
+ if baseName == filepath.Base(filepath.Clean(f)) {
+ if e != first {
+ t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return false
+ }
+ }
+ return true
+ })
+ }
+ })
+}
+
+func viewContent(c []byte) string {
+ if len(c) < 100 {
+ return string(c)
+ }
+ return string(c[:50]) + "...(omit)..." + string(c[50:100])
+}
+
+func tarOf(s ...tarEntry) []tarEntry { return s }
+
+type tarEntry interface {
+ appendTar(tw *tar.Writer, prefix string, format tar.Format) error
+}
+
+type tarEntryFunc func(*tar.Writer, string, tar.Format) error
+
+func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
+ return f(tw, prefix, format)
+}
+
+func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
+ format := tar.FormatUnknown
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case tar.Format:
+ format = v
+ default:
+ panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
+ }
+ }
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, ent := range ents {
+ if err := ent.appendTar(tw, prefix, format); err != nil {
+ t.Fatalf("building input tar: %v", err)
+ }
+ }
+ if err := tw.Close(); err != nil {
+ t.Errorf("closing write of input tar: %v", err)
+ }
+ data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
+ return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
+}
+
+func dir(name string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var o owner
+ mode := os.FileMode(0755)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if !strings.HasSuffix(name, "/") {
+ panic(fmt.Sprintf("missing trailing slash in dir %q ", name))
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeDir,
+ Name: prefix + name,
+ Mode: tm,
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ })
+ })
+}
+
+// xAttr are extended attributes to set on test files created with the file func.
+type xAttr map[string]string
+
+// owner is owner ot set on test files and directories with the file and dir functions.
+type owner struct {
+ uid int
+ gid int
+}
+
+func file(name, contents string, opts ...interface{}) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ var xattrs xAttr
+ var o owner
+ mode := os.FileMode(0644)
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case xAttr:
+ xattrs = v
+ case owner:
+ o = v
+ case os.FileMode:
+ mode = v
+ default:
+ return errors.New("unsupported opt")
+ }
+ }
+ if strings.HasSuffix(name, "/") {
+ return fmt.Errorf("bogus trailing slash in file %q", name)
+ }
+ tm, err := fileModeToTarMode(mode)
+ if err != nil {
+ return err
+ }
+ if len(xattrs) > 0 {
+ format = tar.FormatPAX // only PAX supports xattrs
+ }
+ if err := tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Mode: tm,
+ Xattrs: xattrs,
+ Size: int64(len(contents)),
+ Uid: o.uid,
+ Gid: o.gid,
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ _, err = io.WriteString(tw, contents)
+ return err
+ })
+}
+
+func symlink(name, target string) tarEntry {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
+ return tw.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeSymlink,
+ Name: prefix + name,
+ Linkname: target,
+ Mode: 0644,
+ Format: format,
+ })
+ })
+}
+
+func link(name string, linkname string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeLink,
+ Name: prefix + name,
+ Linkname: linkname,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func chardev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeChar,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func blockdev(name string, major, minor int64) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeBlock,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+func fifo(name string) tarEntry {
+ now := time.Now()
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ return w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeFifo,
+ Name: prefix + name,
+ ModTime: now,
+ Format: format,
+ })
+ })
+}
+
+func prefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: PrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func noPrefetchLandmark() tarEntry {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Name: NoPrefetchLandmark,
+ Typeflag: tar.TypeReg,
+ Size: int64(len([]byte{landmarkContents})),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ contents := []byte{landmarkContents}
+ if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
+ if digestMap == nil {
+ t.Fatalf("digest map mustn't be nil")
+ }
+ content := []byte(contentStr)
+
+ var n int64
+ for n < int64(len(content)) {
+ size := int64(chunkSize)
+ remain := int64(len(content)) - n
+ if remain < size {
+ size = remain
+ }
+ dgstr := digest.Canonical.Digester()
+ if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil {
+ t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)",
+ string(content[n:n+size]), name, n, size)
+ }
+ digestMap[chunkID(name, n, size)] = dgstr.Digest()
+ n += size
+ }
+
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
+ if err := w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: prefix + name,
+ Size: int64(len(content)),
+ Format: format,
+ }); err != nil {
+ return err
+ }
+ if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+func randomContents(n int) string {
+ b := make([]rune, n)
+ for i := range b {
+ b[i] = runes[rand.Intn(len(runes))]
+ }
+ return string(b)
+}
+
+func fileModeToTarMode(mode os.FileMode) (int64, error) {
+ h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
+ if err != nil {
+ return 0, err
+ }
+ return h.Mode, nil
+}
+
+// fileInfoOnlyMode is os.FileMode that populates only file mode.
+type fileInfoOnlyMode os.FileMode
+
+func (f fileInfoOnlyMode) Name() string { return "" }
+func (f fileInfoOnlyMode) Size() int64 { return 0 }
+func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
+func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
+func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
+func (f fileInfoOnlyMode) Sys() interface{} { return nil }
+
+func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
+ if len(streams) == 0 {
+ return // nop
+ }
+
+ wants := map[int64]struct{}{}
+ for _, s := range streams {
+ wants[s] = struct{}{}
+ }
+
+ len0 := len(b)
+ br := bytes.NewReader(b)
+ zr := new(gzip.Reader)
+ t.Logf("got gzip streams:")
+ numStreams := 0
+ for {
+ zoff := len0 - br.Len()
+ if err := zr.Reset(br); err != nil {
+ if err == io.EOF {
+ return
+ }
+ t.Fatalf("countStreams(gzip), Reset: %v", err)
+ }
+ zr.Multistream(false)
+ n, err := io.Copy(io.Discard, zr)
+ if err != nil {
+ t.Fatalf("countStreams(gzip), Copy: %v", err)
+ }
+ var extra string
+ if len(zr.Header.Extra) > 0 {
+ extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
+ }
+ t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
+ delete(wants, int64(zoff))
+ numStreams++
+ }
+}
+
+func GzipDiffIDOf(t *testing.T, b []byte) string {
+ h := sha256.New()
+ zr, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ t.Fatalf("diffIDOf(gzip): %v", err)
+ }
+ defer zr.Close()
+ if _, err := io.Copy(h, zr); err != nil {
+ t.Fatalf("diffIDOf(gzip).Copy: %v", err)
+ }
+ return fmt.Sprintf("sha256:%x", h.Sum(nil))
+}
diff --git a/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
new file mode 100644
index 000000000..57e0aa614
--- /dev/null
+++ b/testdata/push/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
@@ -0,0 +1,342 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+ Copyright 2019 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file.
+*/
+
+package estargz
+
+import (
+ "archive/tar"
+ "hash"
+ "io"
+ "os"
+ "path"
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ // TOCTarName is the name of the JSON file in the tar archive in the
+ // table of contents gzip stream.
+ TOCTarName = "stargz.index.json"
+
+ // FooterSize is the number of bytes in the footer
+ //
+ // The footer is an empty gzip stream with no compression and an Extra
+ // header of the form "%016xSTARGZ", where the 64 bit hex-encoded
+ // number is the offset to the gzip stream of JSON TOC.
+ //
+ // 51 comes from:
+ //
+ // 10 bytes gzip header
+ // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
+ // 2 bytes Extra: SI1 = 'S', SI2 = 'G'
+ // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
+ // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
+ // 5 bytes flate header
+ // 8 bytes gzip footer
+ // (End of the eStargz blob)
+ //
+ // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
+ FooterSize = 51
+
+ // legacyFooterSize is the number of bytes in the legacy stargz footer.
+ //
+ // 47 comes from:
+ //
+ // 10 byte gzip header +
+ // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
+ // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
+ // 5 byte flate header
+ // 8 byte gzip footer (two little endian uint32s: digest, size)
+ legacyFooterSize = 47
+
+ // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
+ // digest of the TOC JSON.
+ // This annotation is valid only when it is specified in `.[]layers.annotations`
+ // of an image manifest.
+ TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
+
+ // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
+ // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
+ // to the runtime but current OCI image doesn't ship this information by default. So we store this
+ // to the special annotation.
+ StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size"
+
+ // PrefetchLandmark is a file entry which indicates the end position of
+ // prefetch in the stargz file.
+ PrefetchLandmark = ".prefetch.landmark"
+
+ // NoPrefetchLandmark is a file entry which indicates that no prefetch should
+ // occur in the stargz file.
+ NoPrefetchLandmark = ".no.prefetch.landmark"
+
+ landmarkContents = 0xf
+)
+
+// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
+type JTOC struct {
+ Version int `json:"version"`
+ Entries []*TOCEntry `json:"entries"`
+}
+
+// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
+type TOCEntry struct {
+ // Name is the tar entry's name. It is the complete path
+ // stored in the tar file, not just the base name.
+ Name string `json:"name"`
+
+ // Type is one of "dir", "reg", "symlink", "hardlink", "char",
+ // "block", "fifo", or "chunk".
+ // The "chunk" type is used for regular file data chunks past the first
+ // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
+ // ChunkOffset, and ChunkSize populated.
+ Type string `json:"type"`
+
+ // Size, for regular files, is the logical size of the file.
+ Size int64 `json:"size,omitempty"`
+
+ // ModTime3339 is the modification time of the tar entry. Empty
+ // means zero or unknown. Otherwise it's in UTC RFC3339
+ // format. Use the ModTime method to access the time.Time value.
+ ModTime3339 string `json:"modtime,omitempty"`
+ modTime time.Time
+
+ // LinkName, for symlinks and hardlinks, is the link target.
+ LinkName string `json:"linkName,omitempty"`
+
+ // Mode is the permission and mode bits.
+ Mode int64 `json:"mode,omitempty"`
+
+ // UID is the user ID of the owner.
+ UID int `json:"uid,omitempty"`
+
+ // GID is the group ID of the owner.
+ GID int `json:"gid,omitempty"`
+
+ // Uname is the username of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same UID.
+ Uname string `json:"userName,omitempty"`
+
+ // Gname is the group name of the owner.
+ //
+ // In the serialized JSON, this field may only be present for
+ // the first entry with the same GID.
+ Gname string `json:"groupName,omitempty"`
+
+ // Offset, for regular files, provides the offset in the
+ // stargz file to the file's data bytes. See ChunkOffset and
+ // ChunkSize.
+ Offset int64 `json:"offset,omitempty"`
+
+ // InnerOffset is an optional field indicates uncompressed offset
+ // of this "reg" or "chunk" payload in a stream starts from Offset.
+ // This field enables to put multiple "reg" or "chunk" payloads
+ // in one chunk with having the same Offset but different InnerOffset.
+ InnerOffset int64 `json:"innerOffset,omitempty"`
+
+ nextOffset int64 // the Offset of the next entry with a non-zero Offset
+
+ // DevMajor is the major device number for "char" and "block" types.
+ DevMajor int `json:"devMajor,omitempty"`
+
+ // DevMinor is the major device number for "char" and "block" types.
+ DevMinor int `json:"devMinor,omitempty"`
+
+ // NumLink is the number of entry names pointing to this entry.
+ // Zero means one name references this entry.
+ // This field is calculated during runtime and not recorded in TOC JSON.
+ NumLink int `json:"-"`
+
+ // Xattrs are the extended attribute for the entry.
+ Xattrs map[string][]byte `json:"xattrs,omitempty"`
+
+ // Digest stores the OCI checksum for regular files payload.
+ // It has the form "sha256:abcdef01234....".
+ Digest string `json:"digest,omitempty"`
+
+ // ChunkOffset is non-zero if this is a chunk of a large,
+ // regular file. If so, the Offset is where the gzip header of
+ // ChunkSize bytes at ChunkOffset in Name begin.
+ //
+ // In serialized form, a "chunkSize" JSON field of zero means
+ // that the chunk goes to the end of the file. After reading
+ // from the stargz TOC, though, the ChunkSize is initialized
+ // to a non-zero file for when Type is either "reg" or
+ // "chunk".
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+
+ // ChunkDigest stores an OCI digest of the chunk. This must be formed
+ // as "sha256:0123abcd...".
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+
+ children map[string]*TOCEntry
+
+ // chunkTopIndex is index of the entry where Offset starts in the blob.
+ chunkTopIndex int
+}
+
+// ModTime returns the entry's modification time.
+func (e *TOCEntry) ModTime() time.Time { return e.modTime }
+
+// NextOffset returns the position (relative to the start of the
+// stargz file) of the next gzip boundary after e.Offset.
+func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
+
+func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
+ if e.children == nil {
+ e.children = make(map[string]*TOCEntry)
+ }
+ if child.Type == "dir" {
+ e.NumLink++ // Entry ".." in the subdirectory links to this directory
+ }
+ e.children[baseName] = child
+}
+
+// isDataType reports whether TOCEntry is a regular file or chunk (something that
+// contains regular file data).
+func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
+
+// Stat returns a FileInfo value representing e.
+func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
+
+// ForeachChild calls f for each child item. If f returns false, iteration ends.
+// If e is not a directory, f is not called.
+func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
+ for name, ent := range e.children {
+ if !f(name, ent) {
+ return
+ }
+ }
+}
+
+// LookupChild returns the directory e's child by its base name.
+func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
+ child, ok = e.children[baseName]
+ return
+}
+
+// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
+type fileInfo struct{ e *TOCEntry }
+
+var _ os.FileInfo = fileInfo{}
+
+func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
+func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
+func (fi fileInfo) Size() int64 { return fi.e.Size }
+func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
+func (fi fileInfo) Sys() interface{} { return fi.e }
+func (fi fileInfo) Mode() (m os.FileMode) {
+ // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
+ m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
+ (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky)
+ switch fi.e.Type {
+ case "dir":
+ m |= os.ModeDir
+ case "symlink":
+ m |= os.ModeSymlink
+ case "char":
+ m |= os.ModeDevice | os.ModeCharDevice
+ case "block":
+ m |= os.ModeDevice
+ case "fifo":
+ m |= os.ModeNamedPipe
+ }
+ return m
+}
+
+// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
+// in a eStargz blob.
+type TOCEntryVerifier interface {
+
+ // Verifier provides a content verifier that can be used for verifying the
+ // contents of the specified TOCEntry.
+ Verifier(ce *TOCEntry) (digest.Verifier, error)
+}
+
+// Compression provides the compression helper to be used creating and parsing eStargz.
+// This package provides gzip-based Compression by default, but any compression
+// algorithm (e.g. zstd) can be used as long as it implements Compression.
+type Compression interface {
+ Compressor
+ Decompressor
+}
+
+// Compressor represents the helper mothods to be used for creating eStargz.
+type Compressor interface {
+ // Writer returns WriteCloser to be used for writing a chunk to eStargz.
+ // Everytime a chunk is written, the WriteCloser is closed and Writer is
+ // called again for writing the next chunk.
+ //
+ // The returned writer should implement "Flush() error" function that flushes
+ // any pending compressed data to the underlying writer.
+ Writer(w io.Writer) (WriteFlushCloser, error)
+
+ // WriteTOCAndFooter is called to write JTOC to the passed Writer.
+ // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
+ // WriteTOCAndFooter can optionally write anything that affects DiffID calculation
+ // (e.g. uncompressed TOC JSON).
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob when it's parsed.
+ WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
+}
+
+// Decompressor represents the helper mothods to be used for parsing eStargz.
+type Decompressor interface {
+ // Reader returns ReadCloser to be used for decompressing file payload.
+ Reader(r io.Reader) (io.ReadCloser, error)
+
+ // FooterSize returns the size of the footer of this blob.
+ FooterSize() int64
+
+ // ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
+ // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
+ // the top until the TOC JSON).
+ //
+ // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
+ // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
+ //
+ // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
+ // footer (blob size - tocOff - FooterSize).
+ // If blobPayloadSize < 0, blobPayloadSize become the blob size.
+ ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
+
+ // ParseTOC parses TOC from the passed reader. The reader provides the partial contents
+ // of the underlying blob that has the range specified by ParseFooter method.
+ //
+ // This function returns tocDgst that represents the digest of TOC that will be used
+ // to verify this blob. This must match to the value returned from
+ // Compressor.WriteTOCAndFooter that is used when creating this blob.
+ //
+ // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
+ // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
+ // and return it.
+ ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
+}
+
+type WriteFlushCloser interface {
+ io.WriteCloser
+ Flush() error
+}
diff --git a/testdata/push/vendor/github.com/docker/cli/AUTHORS b/testdata/push/vendor/github.com/docker/cli/AUTHORS
new file mode 100644
index 000000000..ad1abd496
--- /dev/null
+++ b/testdata/push/vendor/github.com/docker/cli/AUTHORS
@@ -0,0 +1,910 @@
+# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT.
+# This file lists all contributors to the repository.
+# See scripts/docs/generate-authors.sh to make modifications.
+
+A. Lester Buck III
+Aanand Prasad
+Aaron L. Xu
+Aaron Lehmann
+Aaron.L.Xu
+Abdur Rehman
+Abhinandan Prativadi
+Abin Shahab
+Abreto FU
+Ace Tang
+Addam Hardy
+Adolfo Ochagavía
+Adrian Plata
+Adrien Duermael
+Adrien Folie
+Adyanth Hosavalike
+Ahmet Alp Balkan
+Aidan Feldman
+Aidan Hobson Sayers
+AJ Bowen
+Akhil Mohan
+Akihiro Suda
+Akim Demaille
+Alan Thompson
+Alano Terblanche
+Albert Callarisa
+Alberto Roura
+Albin Kerouanton
+Aleksa Sarai
+Aleksander Piotrowski
+Alessandro Boch
+Alex Couture-Beil
+Alex Mavrogiannis
+Alex Mayer
+Alexander Boyd
+Alexander Chneerov
+Alexander Larsson
+Alexander Morozov
+Alexander Ryabov
+Alexandre González
+Alexey Igrychev
+Alexis Couvreur
+Alfred Landrum
+Ali Rostami
+Alicia Lauerman
+Allen Sun
+Alvin Deng
+Amen Belayneh
+Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com>
+Amir Goldstein
+Amit Krishnan
+Amit Shukla
+Amy Lindburg
+Anca Iordache
+Anda Xu
+Andrea Luzzardi
+Andreas Köhler
+Andres G. Aragoneses
+Andres Leon Rangel
+Andrew France
+Andrew Hsu
+Andrew Macpherson
+Andrew McDonnell
+Andrew Po
+Andrew-Zipperer
+Andrey Petrov
+Andrii Berehuliak
+André Martins
+Andy Goldstein
+Andy Rothfusz
+Anil Madhavapeddy
+Ankush Agarwal
+Anne Henmi
+Anton Polonskiy
+Antonio Murdaca
+Antonis Kalipetis
+Anusha Ragunathan
+Ao Li
+Arash Deshmeh
+Arko Dasgupta
+Arnaud Porterie
+Arnaud Rebillout
+Arthur Peka
+Ashly Mathew
+Ashwini Oruganti
+Aslam Ahemad
+Azat Khuyiyakhmetov
+Bardia Keyoumarsi
+Barnaby Gray
+Bastiaan Bakker
+BastianHofmann
+Ben Bodenmiller
+Ben Bonnefoy
+Ben Creasy
+Ben Firshman
+Benjamin Boudreau
+Benjamin Böhmke
+Benjamin Nater
+Benoit Sigoure
+Bhumika Bayani
+Bill Wang
+Bin Liu
+Bingshen Wang
+Bishal Das
+Bjorn Neergaard
+Boaz Shuster
+Boban Acimovic
+Bogdan Anton
+Boris Pruessmann
+Brad Baker
+Bradley Cicenas
+Brandon Mitchell
+Brandon Philips
+Brent Salisbury
+Bret Fisher
+Brian (bex) Exelbierd
+Brian Goff
+Brian Tracy
+Brian Wieder
+Bruno Sousa
+Bryan Bess
+Bryan Boreham
+Bryan Murphy
+bryfry
+Calvin Liu
+Cameron Spear
+Cao Weiwei
+Carlo Mion
+Carlos Alexandro Becker
+Carlos de Paula
+Casey Korver
+Ce Gao
+Cedric Davies
+Cezar Sa Espinola
+Chad Faragher
+Chao Wang
+Charles Chan
+Charles Law
+Charles Smith
+Charlie Drage
+Charlotte Mach
+ChaYoung You
+Chee Hau Lim
+Chen Chuanliang
+Chen Hanxiao
+Chen Mingjie
+Chen Qiu
+Chris Chinchilla
+Chris Couzens
+Chris Gavin
+Chris Gibson
+Chris McKinnel
+Chris Snow
+Chris Vermilion
+Chris Weyl
+Christian Persson
+Christian Stefanescu
+Christophe Robin
+Christophe Vidal
+Christopher Biscardi
+Christopher Crone
+Christopher Jones
+Christopher Petito <47751006+krissetto@users.noreply.github.com>
+Christopher Petito
+Christopher Svensson
+Christy Norman
+Chun Chen
+Clinton Kitson
+Coenraad Loubser
+Colin Hebert
+Collin Guarino
+Colm Hally
+Comical Derskeal <27731088+derskeal@users.noreply.github.com>
+Conner Crosby
+Corey Farrell
+Corey Quon
+Cory Bennet
+Cory Snider
+Craig Osterhout
+Craig Wilhite
+Cristian Staretu
+Daehyeok Mun
+Dafydd Crosby
+Daisuke Ito
+dalanlan
+Damien Nadé
+Dan Cotora
+Danial Gharib
+Daniel Artine
+Daniel Cassidy
+Daniel Dao
+Daniel Farrell
+Daniel Gasienica