diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index 2849a555e0f8..be1a71589eb3 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -549,9 +549,9 @@ ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.1" ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" -ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" -ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" -CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" +ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" +ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" +CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0" ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1" ``` @@ -574,9 +574,9 @@ kubectl --namespace rook-ceph get pod -o jsonpath='{range .items[*]}{range .spec k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0 k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 -k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 -k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 +k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 +k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 quay.io/cephcsi/cephcsi:v3.5.1 -quay.io/csiaddons/volumereplication-operator:v0.1.0 +quay.io/csiaddons/volumereplication-operator:v0.3.0 quay.io/csiaddons/k8s-sidecar:v0.2.1 ``` diff --git a/Documentation/helm-operator.md b/Documentation/helm-operator.md index 0fcab3438c69..0ba6d395ec43 100644 --- a/Documentation/helm-operator.md +++ b/Documentation/helm-operator.md @@ -138,14 +138,14 @@ The following tables lists the configurable parameters of the rook-operator char | `csi.rbdPluginUpdateStrategy` | CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | | `csi.cephFSPluginUpdateStrategy` | CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | | `csi.registrar.image` | Kubernetes CSI registrar image. | `k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0` | -| `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.3.0` | +| `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.4.0` | | `csi.provisioner.image` | Kubernetes CSI provisioner image. | `k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0` | -| `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0` | +| `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1` | | `csi.attacher.image` | Kubernetes CSI Attacher image. | `k8s.gcr.io/sig-storage/csi-attacher:v3.4.0` | | `csi.cephfsPodLabels` | Labels to add to the CSI CephFS Pods. | | | `csi.rbdPodLabels` | Labels to add to the CSI RBD Pods. | | | `csi.volumeReplication.enabled` | Enable Volume Replication. | `false` | -| `csi.volumeReplication.image` | Volume Replication Controller image. | `quay.io/csiaddons/volumereplication-operator:v0.1.0` | +| `csi.volumeReplication.image` | Volume Replication Controller image. | `quay.io/csiaddons/volumereplication-operator:v0.3.0` | | `csi.csiAddons.enabled` | Enable CSIAddons | `false` | | `csi.csiAddons.image` | CSIAddons Sidecar image. | `quay.io/csiaddons/k8s-sidecar:v0.2.1` | | `admissionController.tolerations` | Array of tolerations in YAML format which will be added to admission controller deployment. | | diff --git a/deploy/charts/library/templates/_cluster-psp.tpl b/deploy/charts/library/templates/_cluster-psp.tpl index 1918f7774e86..2d735d1eed35 100644 --- a/deploy/charts/library/templates/_cluster-psp.tpl +++ b/deploy/charts/library/templates/_cluster-psp.tpl @@ -61,4 +61,4 @@ subjects: - kind: ServiceAccount name: rook-ceph-cmd-reporter namespace: {{ .Release.Namespace }} # namespace:cluster -{{- end -}} +{{- end }} diff --git a/deploy/charts/library/templates/_cluster-rolebinding.tpl b/deploy/charts/library/templates/_cluster-rolebinding.tpl index 4196165b2f7f..b9748d40120c 100644 --- a/deploy/charts/library/templates/_cluster-rolebinding.tpl +++ b/deploy/charts/library/templates/_cluster-rolebinding.tpl @@ -90,4 +90,4 @@ subjects: - kind: ServiceAccount name: rook-ceph-purge-osd namespace: {{ .Release.Namespace }} # namespace:cluster -{{- end -}} +{{- end }} diff --git a/deploy/charts/library/templates/_recommended-labels.tpl b/deploy/charts/library/templates/_recommended-labels.tpl index 906755c67d44..546e3b55c604 100644 --- a/deploy/charts/library/templates/_recommended-labels.tpl +++ b/deploy/charts/library/templates/_recommended-labels.tpl @@ -6,4 +6,4 @@ app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/managed-by: helm app.kubernetes.io/created-by: helm helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{- end -}} +{{- end }} diff --git a/deploy/charts/library/templates/_suffix-cluster-namespace.tpl b/deploy/charts/library/templates/_suffix-cluster-namespace.tpl index 6957a910cde9..fdf679340d21 100644 --- a/deploy/charts/library/templates/_suffix-cluster-namespace.tpl +++ b/deploy/charts/library/templates/_suffix-cluster-namespace.tpl @@ -14,5 +14,5 @@ If the cluster namespace is different from the operator namespace, we want to na {{- $clusterNamespace := .Release.Namespace -}} {{- if ne $clusterNamespace $operatorNamespace -}} {{ printf "-%s" $clusterNamespace }} -{{- end -}} -{{- end -}} +{{- end }} +{{- end }} diff --git a/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl b/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl index 96b5fdbfd55a..a8a6fb5e7928 100644 --- a/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl +++ b/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl @@ -3,14 +3,14 @@ Define the clusterName as defaulting to the release namespace */}} {{- define "clusterName" -}} {{ .Values.clusterName | default .Release.Namespace }} -{{- end -}} +{{- end }} {{/* Return the target Kubernetes version. */}} {{- define "capabilities.kubeVersion" -}} {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} -{{- end -}} +{{- end }} {{/* Return the appropriate apiVersion for ingress. @@ -22,5 +22,5 @@ Return the appropriate apiVersion for ingress. {{- print "networking.k8s.io/v1beta1" -}} {{- else -}} {{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} +{{- end }} +{{- end }} diff --git a/deploy/charts/rook-ceph-cluster/templates/rbac.yaml b/deploy/charts/rook-ceph-cluster/templates/rbac.yaml index 7b04b373dece..1e3540572067 100644 --- a/deploy/charts/rook-ceph-cluster/templates/rbac.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/rbac.yaml @@ -14,7 +14,7 @@ clusterrolebindings {{- if .Values.pspEnable }} --- {{ include "library.cluster.psp.rolebindings" . }} -{{- end -}} +{{- end }} {{/* roles @@ -38,4 +38,4 @@ rolebindings {{ include "library.cluster.monitoring.rolebindings" . }} {{- end }} -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml b/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml index 704eb43f9d7b..646824a76424 100644 --- a/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml @@ -11,8 +11,15 @@ apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshotClass metadata: name: {{ $filesystemvsc.name }} +{{- if $filesystemvsc.labels }} + labels: +{{ toYaml $filesystemvsc.labels | indent 4 }} +{{- end }} annotations: snapshot.storage.kubernetes.io/is-default-class: "{{ if default false $filesystemvsc.isDefault }}true{{ else }}false{{ end }}" +{{- if $filesystemvsc.annotations }} +{{ toYaml $filesystemvsc.annotations | indent 4 }} +{{- end }} driver: {{ .Values.operatorNamespace }}.cephfs.csi.ceph.com parameters: clusterID: {{ .Release.Namespace }} @@ -34,8 +41,15 @@ apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshotClass metadata: name: {{ $blockpoolvsc.name }} +{{- if $blockpoolvsc.labels }} + labels: +{{ toYaml $blockpoolvsc.labels | indent 4 }} +{{- end }} annotations: snapshot.storage.kubernetes.io/is-default-class: "{{ if default false $blockpoolvsc.isDefault }}true{{ else }}false{{ end }}" +{{- if $blockpoolvsc.annotations }} +{{ toYaml $blockpoolvsc.annotations | indent 4 }} +{{- end }} driver: {{ .Values.operatorNamespace }}.rbd.csi.ceph.com parameters: clusterID: {{ .Release.Namespace }} diff --git a/deploy/charts/rook-ceph-cluster/values.yaml b/deploy/charts/rook-ceph-cluster/values.yaml index 68c27158bf13..fd3e8f270d9c 100644 --- a/deploy/charts/rook-ceph-cluster/values.yaml +++ b/deploy/charts/rook-ceph-cluster/values.yaml @@ -403,6 +403,8 @@ cephFileSystemVolumeSnapshotClass: name: ceph-filesystem isDefault: true deletionPolicy: Delete + annotations: {} + labels: {} # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#cephfs-snapshots for available configuration parameters: {} @@ -411,6 +413,8 @@ cephBlockPoolsVolumeSnapshotClass: name: ceph-block isDefault: false deletionPolicy: Delete + annotations: {} + labels: {} # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#rbd-snapshots for available configuration parameters: {} diff --git a/deploy/charts/rook-ceph/templates/cluster-rbac.yaml b/deploy/charts/rook-ceph/templates/cluster-rbac.yaml index 38a21a3512f2..06dcca401682 100644 --- a/deploy/charts/rook-ceph/templates/cluster-rbac.yaml +++ b/deploy/charts/rook-ceph/templates/cluster-rbac.yaml @@ -18,7 +18,7 @@ clusterrolebindings {{- if .Values.pspEnable }} --- {{ include "library.cluster.psp.rolebindings" . }} -{{- end -}} +{{- end }} {{/* roles diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index f76bbe8832a9..a61cd8a2440c 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -425,19 +425,19 @@ rules: verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -501,19 +501,19 @@ rules: verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: [""] resources: ["persistentvolumeclaims/status"] verbs: ["update", "patch"] @@ -535,4 +535,4 @@ rules: - apiGroups: [""] resources: ["serviceaccounts"] verbs: ["get"] -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml index 7663797953a8..c99ec442cc8e 100644 --- a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml @@ -99,4 +99,4 @@ roleRef: kind: ClusterRole name: rbd-external-provisioner-runner apiGroup: rbac.authorization.k8s.io -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/templates/psp.yaml b/deploy/charts/rook-ceph/templates/psp.yaml index 23c5a415ba07..920299873c62 100644 --- a/deploy/charts/rook-ceph/templates/psp.yaml +++ b/deploy/charts/rook-ceph/templates/psp.yaml @@ -168,4 +168,4 @@ subjects: name: rook-csi-rbd-provisioner-sa namespace: {{ .Release.Namespace }} # namespace:operator {{- end }} -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/templates/rolebinding.yaml b/deploy/charts/rook-ceph/templates/rolebinding.yaml index 5be43f9caa09..5e5c4e6cebd6 100644 --- a/deploy/charts/rook-ceph/templates/rolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/rolebinding.yaml @@ -47,7 +47,7 @@ roleRef: name: rbd-csi-nodeplugin apiGroup: rbac.authorization.k8s.io --- -{{- end -}} +{{- end }} kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -61,4 +61,4 @@ roleRef: kind: Role name: rbd-external-provisioner-cfg apiGroup: rbac.authorization.k8s.io -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml index 1213e5cf8da0..19edea3d0b5b 100644 --- a/deploy/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -282,11 +282,11 @@ csi: #provisioner: #image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 #snapshotter: - #image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + #image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 #attacher: #image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 #resizer: - #image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 + #image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 # Labels to add to the CSI CephFS Deployments and DaemonSets Pods. #cephfsPodLabels: "key1=value1,key2=value2" # Labels to add to the CSI RBD Deployments and DaemonSets Pods. @@ -296,7 +296,7 @@ csi: # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring volumeReplication: enabled: false - #image: "quay.io/csiaddons/volumereplication-operator:v0.1.0" + #image: "quay.io/csiaddons/volumereplication-operator:v0.3.0" # Enable the CSIAddons sidecar. csiAddons: enabled: false diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml index 254a03b2eb28..b560c9adc7fe 100644 --- a/deploy/examples/common.yaml +++ b/deploy/examples/common.yaml @@ -66,19 +66,19 @@ rules: verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -160,19 +160,19 @@ rules: verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: [""] resources: ["persistentvolumeclaims/status"] verbs: ["update", "patch"] diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt index 1d566afa05f7..2e54b09490c2 100644 --- a/deploy/examples/images.txt +++ b/deploy/examples/images.txt @@ -1,10 +1,10 @@ k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0 k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 - k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 - k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 + k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 quay.io/ceph/ceph:v16.2.7 quay.io/cephcsi/cephcsi:v3.5.1 quay.io/csiaddons/k8s-sidecar:v0.2.1 - quay.io/csiaddons/volumereplication-operator:v0.1.0 + quay.io/csiaddons/volumereplication-operator:v0.3.0 rook/ceph:v1.8.3 diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index 5f715576902f..4627006e5dff 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -163,9 +163,9 @@ data: # these images to the desired release of the CSI driver. # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.1" # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" + # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" # (Optional) set user created priorityclassName for csi plugin pods. @@ -417,7 +417,7 @@ data: CSI_ENABLE_VOLUME_REPLICATION: "false" # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" - # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" + # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0" # Enable the csi addons sidecar. CSI_ENABLE_CSIADDONS: "false" # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1" diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index c21aef1c50dc..b0092c9996ff 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -81,9 +81,9 @@ data: # these images to the desired release of the CSI driver. # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.1" # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" + # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" # (Optional) set user created priorityclassName for csi plugin pods. @@ -335,7 +335,7 @@ data: # Before enabling, ensure the Volume Replication CRDs are created. # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring CSI_ENABLE_VOLUME_REPLICATION: "false" - # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" + # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0" # Enable the csi addons sidecar. CSI_ENABLE_CSIADDONS: "false" # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1" diff --git a/deploy/examples/osd-env-override.yaml b/deploy/examples/osd-env-override.yaml new file mode 100644 index 000000000000..454ccc50a5ed --- /dev/null +++ b/deploy/examples/osd-env-override.yaml @@ -0,0 +1,19 @@ +# ############################################################################################################### +# The `rook-ceph-osd-env-override` ConfigMap is a development feature +# that allows to inject arbitrary environment variables to OSD-related +# containers created by the operator. +# ############################################################################################################### + +apiVersion: v1 +kind: ConfigMap +metadata: + name: rook-ceph-osd-env-override + namespace: rook-ceph +data: + # Bypass the ASan's assertion that it is the very first loaded DSO. + # This is necessary for crimson-osd as it's currently built with + # the ASan sanitizer turned on which means the `libasan.so` must + # the be the very first loaded dynamic library. Unfortunately, this + # isn't fulfilled as the containers use `ld.preload`, so ASan was + # aborting the entire OSD. + ASAN_OPTIONS: verify_asan_link_order=0 diff --git a/pkg/daemon/ceph/client/config.go b/pkg/daemon/ceph/client/config.go index da2f41494131..1ace5a2f25a8 100644 --- a/pkg/daemon/ceph/client/config.go +++ b/pkg/daemon/ceph/client/config.go @@ -292,7 +292,7 @@ func WriteCephConfig(context *clusterd.Context, clusterInfo *ClusterInfo) error } dst, err := ioutil.ReadFile(DefaultConfigFilePath()) if err == nil { - logger.Debugf("config file @ %s: %s", DefaultConfigFilePath(), dst) + logger.Debugf("config file @ %s:\n%s", DefaultConfigFilePath(), dst) } else { logger.Warningf("wrote and copied config file but failed to read it back from %s for logging. %v", DefaultConfigFilePath(), err) } diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go index c4d024521cf1..345cebb1de59 100644 --- a/pkg/daemon/ceph/client/pool.go +++ b/pkg/daemon/ceph/client/pool.go @@ -116,6 +116,30 @@ func GetPoolNamesByID(context *clusterd.Context, clusterInfo *ClusterInfo) (map[ return names, nil } +func getPoolApplication(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) (string, error) { + args := []string{"osd", "pool", "application", "get", poolName} + appDetails, err := NewCephCommand(context, clusterInfo, args).Run() + if err != nil { + return "", errors.Wrapf(err, "failed to get current application for pool %s", poolName) + } + + if len(appDetails) == 0 { + // no application name + return "", nil + } + var application map[string]interface{} + err = json.Unmarshal([]byte(appDetails), &application) + if err != nil { + return "", errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(appDetails)) + } + for name := range application { + // Return the first application name in the list since only one is expected + return name, nil + } + // No application name assigned + return "", nil +} + // GetPoolDetails gets all the details of a given pool func GetPoolDetails(context *clusterd.Context, clusterInfo *ClusterInfo, name string) (CephStoragePoolDetails, error) { args := []string{"osd", "pool", "get", name, "all"} @@ -234,10 +258,19 @@ func DeletePool(context *clusterd.Context, clusterInfo *ClusterInfo, name string } func givePoolAppTag(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, appName string) error { + currentAppName, err := getPoolApplication(context, clusterInfo, poolName) + if err != nil { + return errors.Wrapf(err, "failed to get application for pool %q", poolName) + } + if currentAppName == appName { + logger.Infof("application %q is already set on pool %q", appName, poolName) + return nil + } + args := []string{"osd", "pool", "application", "enable", poolName, appName, confirmFlag} - _, err := NewCephCommand(context, clusterInfo, args).Run() + _, err = NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to enable application %s on pool %s", appName, poolName) + return errors.Wrapf(err, "failed to enable application %q on pool %q", appName, poolName) } return nil diff --git a/pkg/daemon/ceph/client/pool_test.go b/pkg/daemon/ceph/client/pool_test.go index 696e07970442..3c6123ea038f 100644 --- a/pkg/daemon/ceph/client/pool_test.go +++ b/pkg/daemon/ceph/client/pool_test.go @@ -29,6 +29,8 @@ import ( "github.com/stretchr/testify/assert" ) +const emptyApplicationName = `{"":{}}` + func TestCreateECPoolWithOverwrites(t *testing.T) { testCreateECPool(t, true, "") } @@ -79,6 +81,9 @@ func testCreateECPool(t *testing.T, overwrite bool, compressionMode string) { } } if args[2] == "application" { + if args[3] == "get" { + return emptyApplicationName, nil + } assert.Equal(t, "enable", args[3]) assert.Equal(t, "mypool", args[4]) assert.Equal(t, "myapp", args[5]) @@ -97,6 +102,53 @@ func testCreateECPool(t *testing.T, overwrite bool, compressionMode string) { } } +func TestSetPoolApplication(t *testing.T) { + poolName := "testpool" + appName := "testapp" + setAppName := false + blankAppName := false + clusterInfo := AdminTestClusterInfo("mycluster") + executor := &exectest.MockExecutor{} + context := &clusterd.Context{Executor: executor} + executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { + logger.Infof("Command: %s %v", command, args) + if args[1] == "pool" && args[2] == "application" { + if args[3] == "get" { + assert.Equal(t, poolName, args[4]) + if blankAppName { + return emptyApplicationName, nil + } else { + return fmt.Sprintf(`{"%s":{}}`, appName), nil + } + } + if args[3] == "enable" { + setAppName = true + assert.Equal(t, poolName, args[4]) + assert.Equal(t, appName, args[5]) + return "", nil + } + } + return "", errors.Errorf("unexpected ceph command %q", args) + } + + t.Run("set pool application", func(t *testing.T) { + setAppName = false + blankAppName = true + err := givePoolAppTag(context, clusterInfo, poolName, appName) + assert.NoError(t, err) + assert.True(t, setAppName) + }) + + t.Run("pool application already set", func(t *testing.T) { + setAppName = false + blankAppName = false + err := givePoolAppTag(context, clusterInfo, poolName, appName) + assert.NoError(t, err) + assert.False(t, setAppName) + }) + +} + func TestCreateReplicaPoolWithFailureDomain(t *testing.T) { testCreateReplicaPool(t, "osd", "mycrushroot", "", "") } @@ -137,6 +189,9 @@ func testCreateReplicaPool(t *testing.T, failureDomain, crushRoot, deviceClass, return "", nil } if args[2] == "application" { + if args[3] == "get" { + return emptyApplicationName, nil + } assert.Equal(t, "enable", args[3]) assert.Equal(t, "mypool", args[4]) assert.Equal(t, "myapp", args[5]) @@ -465,7 +520,11 @@ func testCreatePoolWithReplicasPerFailureDomain(t *testing.T, failureDomain, cru poolRuleSet = true return "", nil } - if len(args) >= 4 && args[1] == "pool" && args[2] == "application" && args[3] == "enable" { + if len(args) >= 4 && args[1] == "pool" && args[2] == "application" { + if args[3] == "get" { + return emptyApplicationName, nil + } + crushRuleName := args[4] assert.Equal(t, crushRuleName, poolSpec.Name) poolAppEnable = true diff --git a/pkg/daemon/ceph/client/upgrade.go b/pkg/daemon/ceph/client/upgrade.go index 17f8c5d57d7d..5d7534386f2b 100644 --- a/pkg/daemon/ceph/client/upgrade.go +++ b/pkg/daemon/ceph/client/upgrade.go @@ -46,7 +46,7 @@ func getCephMonVersionString(context *clusterd.Context, clusterInfo *ClusterInfo args := []string{"version"} buf, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return "", errors.Wrap(err, "failed to run 'ceph version'") + return "", errors.Wrapf(err, "failed to run 'ceph version'. %s", string(buf)) } output := string(buf) logger.Debug(output) diff --git a/pkg/operator/ceph/cluster/osd/envs.go b/pkg/operator/ceph/cluster/osd/envs.go index 11b714d88a65..7e3cf18315ed 100644 --- a/pkg/operator/ceph/cluster/osd/envs.go +++ b/pkg/operator/ceph/cluster/osd/envs.go @@ -32,6 +32,7 @@ const ( osdWalSizeEnvVarName = "ROOK_OSD_WAL_SIZE" osdsPerDeviceEnvVarName = "ROOK_OSDS_PER_DEVICE" osdDeviceClassEnvVarName = "ROOK_OSD_DEVICE_CLASS" + osdConfigMapOverrideName = "rook-ceph-osd-env-override" // EncryptedDeviceEnvVarName is used in the pod spec to indicate whether the OSD is encrypted or not EncryptedDeviceEnvVarName = "ROOK_ENCRYPTED_DEVICE" PVCNameEnvVarName = "ROOK_PVC_NAME" @@ -213,6 +214,19 @@ func osdActivateEnvVar() []v1.EnvVar { return append(cephVolumeEnvVar(), monEnvVars...) } +func getEnvFromSources() []v1.EnvFromSource { + optionalConfigMapRef := true + + return []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{Name: osdConfigMapOverrideName}, + Optional: &optionalConfigMapRef, + }, + }, + } +} + func getTcmallocMaxTotalThreadCacheBytes(tcmallocMaxTotalThreadCacheBytes string) v1.EnvVar { var value string // If empty we read the default value from the file coming with the package diff --git a/pkg/operator/ceph/cluster/osd/provision_spec.go b/pkg/operator/ceph/cluster/osd/provision_spec.go index e9dbec33f25f..c4a7d32b72f6 100644 --- a/pkg/operator/ceph/cluster/osd/provision_spec.go +++ b/pkg/operator/ceph/cluster/osd/provision_spec.go @@ -302,6 +302,7 @@ func (c *Cluster) provisionOSDContainer(osdProps osdProperties, copyBinariesMoun Image: c.spec.CephVersion.Image, VolumeMounts: volumeMounts, Env: envVars, + EnvFrom: getEnvFromSources(), SecurityContext: &v1.SecurityContext{ Privileged: &privileged, RunAsUser: &runAsUser, diff --git a/pkg/operator/ceph/cluster/osd/spec.go b/pkg/operator/ceph/cluster/osd/spec.go index 2f57fa52a661..09e195e733d6 100644 --- a/pkg/operator/ceph/cluster/osd/spec.go +++ b/pkg/operator/ceph/cluster/osd/spec.go @@ -466,6 +466,7 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionC Image: c.rookVersion, VolumeMounts: configVolumeMounts, Env: configEnvVars, + EnvFrom: getEnvFromSources(), SecurityContext: securityContext, }) } @@ -549,6 +550,7 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionC Image: c.spec.CephVersion.Image, VolumeMounts: volumeMounts, Env: envVars, + EnvFrom: getEnvFromSources(), Resources: osdProps.resources, SecurityContext: securityContext, StartupProbe: controller.GenerateStartupProbeExecDaemon(opconfig.OsdType, osdID), @@ -767,6 +769,7 @@ func (c *Cluster) getActivateOSDInitContainer(configDir, namespace, osdID string VolumeMounts: volMounts, SecurityContext: controller.PrivilegedContext(true), Env: envVars, + EnvFrom: getEnvFromSources(), Resources: osdProps.resources, } diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index de394bef79aa..86c79da4c348 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -105,9 +105,9 @@ var ( DefaultRegistrarImage = "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" DefaultProvisionerImage = "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" DefaultAttacherImage = "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" - DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" - DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" - DefaultVolumeReplicationImage = "quay.io/csiaddons/volumereplication-operator:v0.1.0" + DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" + DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" + DefaultVolumeReplicationImage = "quay.io/csiaddons/volumereplication-operator:v0.3.0" DefaultCSIAddonsImage = "quay.io/csiaddons/k8s-sidecar:v0.2.1" // Local package template path for RBD diff --git a/pkg/operator/ceph/object/notification/provisioner.go b/pkg/operator/ceph/object/notification/provisioner.go index 5558085b4df4..0331dceae580 100644 --- a/pkg/operator/ceph/object/notification/provisioner.go +++ b/pkg/operator/ceph/object/notification/provisioner.go @@ -19,7 +19,6 @@ package notification import ( "context" - "net/http" "github.com/aws/aws-sdk-go/service/s3" "github.com/ceph/go-ceph/rgw/admin" @@ -44,26 +43,14 @@ type provisioner struct { objectStoreName types.NamespacedName } -func getUserCredentials(opManagerContext context.Context, username string, objStore *cephv1.CephObjectStore, objContext *object.Context) (accessKey string, secretKey string, err error) { +func getUserCredentials(adminOpsCtx *object.AdminOpsContext, opManagerContext context.Context, username string) (accessKey string, secretKey string, err error) { if len(username) == 0 { err = errors.New("no user name provided") return } - adminAccessKey, adminSecretKey, err := object.GetAdminOPSUserCredentials(objContext, &objStore.Spec) - if err != nil { - err = errors.Wrapf(err, "failed to get Ceph RGW admin ops user credentials when getting user %q", username) - return - } - - adminOpsClient, err := admin.New(objContext.Endpoint, adminAccessKey, adminSecretKey, &http.Client{}) - if err != nil { - err = errors.Wrapf(err, "failed to build admin ops API connection to get user %q", username) - return - } - var u admin.User - u, err = adminOpsClient.GetUser(opManagerContext, admin.User{ID: username}) + u, err = adminOpsCtx.AdminOpsClient.GetUser(opManagerContext, admin.User{ID: username}) if err != nil { err = errors.Wrapf(err, "failed to get ceph user %q", username) return @@ -88,12 +75,24 @@ func newS3Agent(p provisioner) (*object.S3Agent, error) { // CephClusterSpec is needed for GetAdminOPSUserCredentials() objContext.CephClusterSpec = *p.clusterSpec - accessKey, secretKey, err := getUserCredentials(p.opManagerContext, p.owner, objStore, objContext) + adminOpsCtx, err := object.NewMultisiteAdminOpsContext(objContext, &objStore.Spec) + if err != nil { + return nil, errors.Wrapf(err, "failed to get admin Ops context for CephObjectStore %q", p.objectStoreName) + + } + accessKey, secretKey, err := getUserCredentials(adminOpsCtx, p.opManagerContext, p.owner) if err != nil { return nil, errors.Wrapf(err, "failed to get owner credentials for %q", p.owner) } + tlsCert := make([]byte, 0) + if objStore.Spec.IsTLSEnabled() { + tlsCert, _, err = object.GetTlsCaCert(objContext, &objStore.Spec) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch TLS certificate for the object store") + } + } - return object.NewS3Agent(accessKey, secretKey, objContext.Endpoint, objContext.ZoneGroup, logger.LevelAt(capnslog.DEBUG), objContext.Context.KubeConfig.CertData) + return object.NewS3Agent(accessKey, secretKey, objContext.Endpoint, objContext.ZoneGroup, logger.LevelAt(capnslog.DEBUG), tlsCert) } // TODO: convert all rules without restrictions once the AWS SDK supports that diff --git a/pkg/operator/ceph/object/topic/provisioner.go b/pkg/operator/ceph/object/topic/provisioner.go index f94bf8fbf482..702a22383dff 100644 --- a/pkg/operator/ceph/object/topic/provisioner.go +++ b/pkg/operator/ceph/object/topic/provisioner.go @@ -86,7 +86,10 @@ func createSNSClient(p provisioner, objectStoreName types.NamespacedName) (*sns. } tlsEnabled := objStore.Spec.IsTLSEnabled() if tlsEnabled { - tlsCert := objContext.Context.KubeConfig.CertData + tlsCert, _, err := object.GetTlsCaCert(objContext, &objStore.Spec) + if err != nil { + return nil, errors.Wrap(err, "failed to get TLS certificate for the object store") + } if len(tlsCert) > 0 { client.Transport = object.BuildTransportTLS(tlsCert, false) } @@ -99,6 +102,7 @@ func createSNSClient(p provisioner, objectStoreName types.NamespacedName) (*sns. WithEndpoint(objContext.Endpoint). WithMaxRetries(3). WithDisableSSL(!tlsEnabled). + WithHTTPClient(&client). WithLogLevel(logLevel), ) if err != nil { diff --git a/pkg/operator/ceph/pool/controller_test.go b/pkg/operator/ceph/pool/controller_test.go index 21dbec2eca28..9ab58a50c158 100644 --- a/pkg/operator/ceph/pool/controller_test.go +++ b/pkg/operator/ceph/pool/controller_test.go @@ -56,6 +56,9 @@ func TestCreatePool(t *testing.T) { return `{"k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}`, nil } if args[0] == "osd" && args[1] == "pool" && args[2] == "application" { + if args[3] == "get" { + return ``, nil + } assert.Equal(t, "enable", args[3]) if args[5] != "rbd" { enabledMetricsApp = true diff --git a/pkg/operator/k8sutil/resources.go b/pkg/operator/k8sutil/resources.go index 800f87a6c720..c8a01005c796 100644 --- a/pkg/operator/k8sutil/resources.go +++ b/pkg/operator/k8sutil/resources.go @@ -148,37 +148,16 @@ func (info *OwnerInfo) GetUID() types.UID { } func MergeResourceRequirements(first, second v1.ResourceRequirements) v1.ResourceRequirements { - // if the first has a value not set check if second has and set it in first - if _, ok := first.Limits[v1.ResourceCPU]; !ok { - if _, ok = second.Limits[v1.ResourceCPU]; ok { - if first.Limits == nil { - first.Limits = v1.ResourceList{} - } - first.Limits[v1.ResourceCPU] = second.Limits[v1.ResourceCPU] + // if the first has no limits set, apply the second limits if any are specified + if len(first.Limits) == 0 { + if len(second.Limits) > 0 { + first.Limits = second.Limits } } - if _, ok := first.Limits[v1.ResourceMemory]; !ok { - if _, ok = second.Limits[v1.ResourceMemory]; ok { - if first.Limits == nil { - first.Limits = v1.ResourceList{} - } - first.Limits[v1.ResourceMemory] = second.Limits[v1.ResourceMemory] - } - } - if _, ok := first.Requests[v1.ResourceCPU]; !ok { - if _, ok = second.Requests[v1.ResourceCPU]; ok { - if first.Requests == nil { - first.Requests = v1.ResourceList{} - } - first.Requests[v1.ResourceCPU] = second.Requests[v1.ResourceCPU] - } - } - if _, ok := first.Requests[v1.ResourceMemory]; !ok { - if _, ok = second.Requests[v1.ResourceMemory]; ok { - if first.Requests == nil { - first.Requests = v1.ResourceList{} - } - first.Requests[v1.ResourceMemory] = second.Requests[v1.ResourceMemory] + // if the first has no requests set, apply the second requests if any are specified + if len(first.Requests) == 0 { + if len(second.Requests) > 0 { + first.Requests = second.Requests } } return first diff --git a/pkg/operator/k8sutil/resources_test.go b/pkg/operator/k8sutil/resources_test.go index fc47c4fed186..5eaba5fd0987 100644 --- a/pkg/operator/k8sutil/resources_test.go +++ b/pkg/operator/k8sutil/resources_test.go @@ -37,17 +37,18 @@ func TestMergeResourceRequirements(t *testing.T) { first = v1.ResourceRequirements{} second = v1.ResourceRequirements{ Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), + v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), + v1.ResourceStorage: *resource.NewQuantity(50.0, resource.BinarySI), }, Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI), + v1.ResourceName("foo"): *resource.NewQuantity(23.0, resource.BinarySI), }, } result = MergeResourceRequirements(first, second) - assert.Equal(t, 1, len(result.Limits)) + assert.Equal(t, 2, len(result.Limits)) assert.Equal(t, 1, len(result.Requests)) assert.Equal(t, "100", result.Limits.Cpu().String()) - assert.Equal(t, "1337", result.Requests.Memory().String()) + assert.Equal(t, "50", result.Limits.Storage().String()) first = v1.ResourceRequirements{ Limits: v1.ResourceList{ diff --git a/tests/framework/clients/bucket.go b/tests/framework/clients/bucket.go index cfcc61ad4876..037bcdf19397 100644 --- a/tests/framework/clients/bucket.go +++ b/tests/framework/clients/bucket.go @@ -150,18 +150,22 @@ func (b *BucketOperation) CheckOBMaxObject(obcName, maxobject string) bool { } // Checks the bucket notifications set on RGW backend bucket -func (b *BucketOperation) CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint string) bool { +func (b *BucketOperation) CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName string, helper *TestClient, tlsEnabled bool) bool { var s3client *rgw.S3Agent - s3AccessKey, _ := b.GetAccessKey(obcName) - s3SecretKey, _ := b.GetSecretKey(obcName) - - //TODO : add TLS check - s3client, err := rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true, nil) + var err error + s3endpoint, _ := helper.ObjectClient.GetEndPointUrl(namespace, storeName) + s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) + s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) + if tlsEnabled { + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true) + } else { + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true, nil) + } if err != nil { - logger.Errorf("S3 client creation failed with error %v", err) + logger.Infof("failed to s3client due to %v", err) return false } - + logger.Infof("endpoint (%s) Accesskey (%s) secret (%s)", s3endpoint, s3AccessKey, s3SecretKey) notifications, err := s3client.Client.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ Bucket: &bucketname, }) diff --git a/tests/framework/utils/snapshot.go b/tests/framework/utils/snapshot.go index b333ec2f6cf8..c657305e0f9a 100644 --- a/tests/framework/utils/snapshot.go +++ b/tests/framework/utils/snapshot.go @@ -27,7 +27,7 @@ import ( const ( // snapshotterVersion from which the snapshotcontroller and CRD will be // installed - snapshotterVersion = "v4.0.0" + snapshotterVersion = "v5.0.1" repoURL = "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter" rbacPath = "deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml" controllerPath = "deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml" @@ -83,21 +83,21 @@ func (k8sh *K8sHelper) snapshotController(action string) error { // WaitForSnapshotController check snapshotcontroller is ready within given // retries count. func (k8sh *K8sHelper) WaitForSnapshotController(retries int) error { - namespace := "default" + namespace := "kube-system" ctx := context.TODO() snapshotterName := "snapshot-controller" for i := 0; i < retries; i++ { - ss, err := k8sh.Clientset.AppsV1().StatefulSets(namespace).Get(ctx, snapshotterName, metav1.GetOptions{}) + ss, err := k8sh.Clientset.AppsV1().Deployments(namespace).Get(ctx, snapshotterName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } if ss.Status.ReadyReplicas > 0 && ss.Status.ReadyReplicas == ss.Status.Replicas { return nil } - logger.Infof("waiting for %q statufulset in namespace %q (readyreplicas %d < replicas %d)", snapshotterName, namespace, ss.Status.ReadyReplicas, ss.Status.Replicas) + logger.Infof("waiting for %q deployment in namespace %q (readyreplicas %d < replicas %d)", snapshotterName, namespace, ss.Status.ReadyReplicas, ss.Status.Replicas) time.Sleep(RetryInterval * time.Second) } - return fmt.Errorf("giving up waiting for %q statufulset in namespace %q", snapshotterName, namespace) + return fmt.Errorf("giving up waiting for %q deployment in namespace %q", snapshotterName, namespace) } // CreateSnapshotController creates the snapshotcontroller and required RBAC diff --git a/tests/integration/ceph_bucket_notification_test.go b/tests/integration/ceph_bucket_notification_test.go index 91b285b1ccc2..21297f7b23b4 100644 --- a/tests/integration/ceph_bucket_notification_test.go +++ b/tests/integration/ceph_bucket_notification_test.go @@ -23,26 +23,20 @@ import ( "github.com/rook/rook/pkg/daemon/ceph/client" rgw "github.com/rook/rook/pkg/operator/ceph/object" + "github.com/rook/rook/tests/framework/clients" "github.com/rook/rook/tests/framework/utils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (s *ObjectSuite) TestBucketNotifications() { +func testBucketNotifications(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { if utils.IsPlatformOpenShift() { s.T().Skip("bucket notification tests skipped on openshift") } - objectStoreServicePrefix = objectStoreServicePrefixUniq bucketNotificationLabelPrefix := "bucket-notification-" - storeName := "test-store-bucket-notification" - tlsEnable := false - namespace := s.settings.Namespace obcNamespace := "default" - helper := s.helper - k8sh := s.k8sh - logger.Infof("Running on Rook Cluster %s", namespace) - createCephObjectStore(s.T(), helper, k8sh, namespace, storeName, 3, tlsEnable) ctx := context.TODO() clusterInfo := client.AdminTestClusterInfo(namespace) @@ -57,7 +51,7 @@ func (s *ObjectSuite) TestBucketNotifications() { notificationName := "my-notification" topicName := "my-topic" httpEndpointService := "my-notification-sink" - s3endpoint, _ := helper.ObjectClient.GetEndPointUrl(namespace, storeName) + logger.Infof("Testing Bucket Notifications on %s", storeName) t.Run("create CephBucketTopic", func(t *testing.T) { err := helper.TopicClient.CreateTopic(topicName, storeName, httpEndpointService) @@ -111,7 +105,7 @@ func (s *ObjectSuite) TestBucketNotifications() { t.Run("check CephBucketNotification created for bucket", func(t *testing.T) { notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) logger.Info("CephBucketNotification created successfully on bucket") @@ -127,7 +121,7 @@ func (s *ObjectSuite) TestBucketNotifications() { notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { // TODO : add api to fetch all the notification from backend to see if it is unaffected t.Skipped() - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) @@ -142,7 +136,7 @@ func (s *ObjectSuite) TestBucketNotifications() { notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { // TODO : add api to fetch all the notification from backend to see if it is unaffected t.Skipped() - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) @@ -158,7 +152,7 @@ func (s *ObjectSuite) TestBucketNotifications() { // check whether existing bucket notification uneffected var notificationPresent bool for i := 0; i < 4; i++ { - notificationPresent = helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + notificationPresent = helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) if !notificationPresent { break } @@ -196,7 +190,7 @@ func (s *ObjectSuite) TestBucketNotifications() { t.Run("new-notification should be configured for bucket", func(t *testing.T) { // check whether bucket notification added notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, newNotificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, newNotificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) @@ -269,7 +263,7 @@ func (s *ObjectSuite) TestBucketNotifications() { t.Run("notification should be configured after creating the topic", func(t *testing.T) { // check whether bucket notification added, should pass since topic got created notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, reverseOBCName, reverseBucketName, reverseNotificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, reverseOBCName, reverseBucketName, reverseNotificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) diff --git a/tests/integration/ceph_object_test.go b/tests/integration/ceph_object_test.go index 6cb02a5f5f83..44fec756e02b 100644 --- a/tests/integration/ceph_object_test.go +++ b/tests/integration/ceph_object_test.go @@ -157,6 +157,10 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite // now test operation of the first object store testObjectStoreOperations(s, helper, k8sh, namespace, storeName) + + bucketNotificationTestStoreName := "bucket-notification-" + storeName + createCephObjectStore(s.T(), helper, k8sh, namespace, bucketNotificationTestStoreName, 1, tlsEnable) + testBucketNotifications(s, helper, k8sh, namespace, bucketNotificationTestStoreName) } func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { @@ -164,6 +168,7 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * clusterInfo := client.AdminTestClusterInfo(namespace) t := s.T() + logger.Infof("Testing Object Operations on %s", storeName) t.Run("create CephObjectStoreUser", func(t *testing.T) { createCephObjectUser(s, helper, k8sh, namespace, storeName, userid, true, true) i := 0 @@ -362,7 +367,8 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * assert.True(t, k8sh.CheckPodCountAndState("rook-ceph-mgr", namespace, 1, "Running")) }) - t.Run("CephObjectStore should delete now that dependents are gone", func(t *testing.T) { + // tests are complete, now delete the objectstore + s.T().Run("CephObjectStore should delete now that dependents are gone", func(t *testing.T) { // wait initially since it will almost never detect on the first try without this. time.Sleep(3 * time.Second) diff --git a/tests/scripts/helm.sh b/tests/scripts/helm.sh index 0c70a010e9bf..94acf558adf0 100755 --- a/tests/scripts/helm.sh +++ b/tests/scripts/helm.sh @@ -2,7 +2,7 @@ temp="/tmp/rook-tests-scripts-helm" -helm_version="${HELM_VERSION:-"v3.6.2"}" +helm_version="${HELM_VERSION:-"v3.8.0"}" arch="${ARCH:-}" detectArch() {