From 0a8140463d24b465ee1ef4b2923b0be568f4b348 Mon Sep 17 00:00:00 2001 From: Jiffin Tony Thottan Date: Wed, 12 Jan 2022 17:11:04 +0530 Subject: [PATCH 01/13] rgw: inject tls certs for bucket notification and topic operations The certs for accessing TLS enabled RGW is saved as secrets and inject them if controllers for notification and topics if request is sent to TLS enabled RGW endpoint. Signed-off-by: Jiffin Tony Thottan Signed-off-by: Jiffin Tony Thottan (cherry picked from commit a97747cece6c7f2e5acd1a66a81067356a407002) --- .../ceph/object/notification/provisioner.go | 33 +++++++++---------- pkg/operator/ceph/object/topic/provisioner.go | 6 +++- tests/framework/clients/bucket.go | 20 ++++++----- .../ceph_bucket_notification_test.go | 26 ++++++--------- tests/integration/ceph_object_test.go | 8 ++++- 5 files changed, 50 insertions(+), 43 deletions(-) diff --git a/pkg/operator/ceph/object/notification/provisioner.go b/pkg/operator/ceph/object/notification/provisioner.go index 5558085b4df4..0331dceae580 100644 --- a/pkg/operator/ceph/object/notification/provisioner.go +++ b/pkg/operator/ceph/object/notification/provisioner.go @@ -19,7 +19,6 @@ package notification import ( "context" - "net/http" "github.com/aws/aws-sdk-go/service/s3" "github.com/ceph/go-ceph/rgw/admin" @@ -44,26 +43,14 @@ type provisioner struct { objectStoreName types.NamespacedName } -func getUserCredentials(opManagerContext context.Context, username string, objStore *cephv1.CephObjectStore, objContext *object.Context) (accessKey string, secretKey string, err error) { +func getUserCredentials(adminOpsCtx *object.AdminOpsContext, opManagerContext context.Context, username string) (accessKey string, secretKey string, err error) { if len(username) == 0 { err = errors.New("no user name provided") return } - adminAccessKey, adminSecretKey, err := object.GetAdminOPSUserCredentials(objContext, &objStore.Spec) - if err != nil { - err = errors.Wrapf(err, "failed to get Ceph RGW admin ops user credentials when getting user %q", username) - return - } - - adminOpsClient, err := admin.New(objContext.Endpoint, adminAccessKey, adminSecretKey, &http.Client{}) - if err != nil { - err = errors.Wrapf(err, "failed to build admin ops API connection to get user %q", username) - return - } - var u admin.User - u, err = adminOpsClient.GetUser(opManagerContext, admin.User{ID: username}) + u, err = adminOpsCtx.AdminOpsClient.GetUser(opManagerContext, admin.User{ID: username}) if err != nil { err = errors.Wrapf(err, "failed to get ceph user %q", username) return @@ -88,12 +75,24 @@ func newS3Agent(p provisioner) (*object.S3Agent, error) { // CephClusterSpec is needed for GetAdminOPSUserCredentials() objContext.CephClusterSpec = *p.clusterSpec - accessKey, secretKey, err := getUserCredentials(p.opManagerContext, p.owner, objStore, objContext) + adminOpsCtx, err := object.NewMultisiteAdminOpsContext(objContext, &objStore.Spec) + if err != nil { + return nil, errors.Wrapf(err, "failed to get admin Ops context for CephObjectStore %q", p.objectStoreName) + + } + accessKey, secretKey, err := getUserCredentials(adminOpsCtx, p.opManagerContext, p.owner) if err != nil { return nil, errors.Wrapf(err, "failed to get owner credentials for %q", p.owner) } + tlsCert := make([]byte, 0) + if objStore.Spec.IsTLSEnabled() { + tlsCert, _, err = object.GetTlsCaCert(objContext, &objStore.Spec) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch TLS certificate for the object store") + } + } - return object.NewS3Agent(accessKey, secretKey, objContext.Endpoint, objContext.ZoneGroup, logger.LevelAt(capnslog.DEBUG), objContext.Context.KubeConfig.CertData) + return object.NewS3Agent(accessKey, secretKey, objContext.Endpoint, objContext.ZoneGroup, logger.LevelAt(capnslog.DEBUG), tlsCert) } // TODO: convert all rules without restrictions once the AWS SDK supports that diff --git a/pkg/operator/ceph/object/topic/provisioner.go b/pkg/operator/ceph/object/topic/provisioner.go index f94bf8fbf482..702a22383dff 100644 --- a/pkg/operator/ceph/object/topic/provisioner.go +++ b/pkg/operator/ceph/object/topic/provisioner.go @@ -86,7 +86,10 @@ func createSNSClient(p provisioner, objectStoreName types.NamespacedName) (*sns. } tlsEnabled := objStore.Spec.IsTLSEnabled() if tlsEnabled { - tlsCert := objContext.Context.KubeConfig.CertData + tlsCert, _, err := object.GetTlsCaCert(objContext, &objStore.Spec) + if err != nil { + return nil, errors.Wrap(err, "failed to get TLS certificate for the object store") + } if len(tlsCert) > 0 { client.Transport = object.BuildTransportTLS(tlsCert, false) } @@ -99,6 +102,7 @@ func createSNSClient(p provisioner, objectStoreName types.NamespacedName) (*sns. WithEndpoint(objContext.Endpoint). WithMaxRetries(3). WithDisableSSL(!tlsEnabled). + WithHTTPClient(&client). WithLogLevel(logLevel), ) if err != nil { diff --git a/tests/framework/clients/bucket.go b/tests/framework/clients/bucket.go index cfcc61ad4876..037bcdf19397 100644 --- a/tests/framework/clients/bucket.go +++ b/tests/framework/clients/bucket.go @@ -150,18 +150,22 @@ func (b *BucketOperation) CheckOBMaxObject(obcName, maxobject string) bool { } // Checks the bucket notifications set on RGW backend bucket -func (b *BucketOperation) CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint string) bool { +func (b *BucketOperation) CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName string, helper *TestClient, tlsEnabled bool) bool { var s3client *rgw.S3Agent - s3AccessKey, _ := b.GetAccessKey(obcName) - s3SecretKey, _ := b.GetSecretKey(obcName) - - //TODO : add TLS check - s3client, err := rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true, nil) + var err error + s3endpoint, _ := helper.ObjectClient.GetEndPointUrl(namespace, storeName) + s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) + s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) + if tlsEnabled { + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true) + } else { + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true, nil) + } if err != nil { - logger.Errorf("S3 client creation failed with error %v", err) + logger.Infof("failed to s3client due to %v", err) return false } - + logger.Infof("endpoint (%s) Accesskey (%s) secret (%s)", s3endpoint, s3AccessKey, s3SecretKey) notifications, err := s3client.Client.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ Bucket: &bucketname, }) diff --git a/tests/integration/ceph_bucket_notification_test.go b/tests/integration/ceph_bucket_notification_test.go index 91b285b1ccc2..21297f7b23b4 100644 --- a/tests/integration/ceph_bucket_notification_test.go +++ b/tests/integration/ceph_bucket_notification_test.go @@ -23,26 +23,20 @@ import ( "github.com/rook/rook/pkg/daemon/ceph/client" rgw "github.com/rook/rook/pkg/operator/ceph/object" + "github.com/rook/rook/tests/framework/clients" "github.com/rook/rook/tests/framework/utils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (s *ObjectSuite) TestBucketNotifications() { +func testBucketNotifications(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { if utils.IsPlatformOpenShift() { s.T().Skip("bucket notification tests skipped on openshift") } - objectStoreServicePrefix = objectStoreServicePrefixUniq bucketNotificationLabelPrefix := "bucket-notification-" - storeName := "test-store-bucket-notification" - tlsEnable := false - namespace := s.settings.Namespace obcNamespace := "default" - helper := s.helper - k8sh := s.k8sh - logger.Infof("Running on Rook Cluster %s", namespace) - createCephObjectStore(s.T(), helper, k8sh, namespace, storeName, 3, tlsEnable) ctx := context.TODO() clusterInfo := client.AdminTestClusterInfo(namespace) @@ -57,7 +51,7 @@ func (s *ObjectSuite) TestBucketNotifications() { notificationName := "my-notification" topicName := "my-topic" httpEndpointService := "my-notification-sink" - s3endpoint, _ := helper.ObjectClient.GetEndPointUrl(namespace, storeName) + logger.Infof("Testing Bucket Notifications on %s", storeName) t.Run("create CephBucketTopic", func(t *testing.T) { err := helper.TopicClient.CreateTopic(topicName, storeName, httpEndpointService) @@ -111,7 +105,7 @@ func (s *ObjectSuite) TestBucketNotifications() { t.Run("check CephBucketNotification created for bucket", func(t *testing.T) { notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) logger.Info("CephBucketNotification created successfully on bucket") @@ -127,7 +121,7 @@ func (s *ObjectSuite) TestBucketNotifications() { notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { // TODO : add api to fetch all the notification from backend to see if it is unaffected t.Skipped() - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) @@ -142,7 +136,7 @@ func (s *ObjectSuite) TestBucketNotifications() { notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { // TODO : add api to fetch all the notification from backend to see if it is unaffected t.Skipped() - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) @@ -158,7 +152,7 @@ func (s *ObjectSuite) TestBucketNotifications() { // check whether existing bucket notification uneffected var notificationPresent bool for i := 0; i < 4; i++ { - notificationPresent = helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, s3endpoint) + notificationPresent = helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, notificationName, helper, objectStore.Spec.IsTLSEnabled()) if !notificationPresent { break } @@ -196,7 +190,7 @@ func (s *ObjectSuite) TestBucketNotifications() { t.Run("new-notification should be configured for bucket", func(t *testing.T) { // check whether bucket notification added notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, newNotificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, obcName, bucketname, newNotificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) @@ -269,7 +263,7 @@ func (s *ObjectSuite) TestBucketNotifications() { t.Run("notification should be configured after creating the topic", func(t *testing.T) { // check whether bucket notification added, should pass since topic got created notificationPresent := utils.Retry(12, 2*time.Second, "notification is created for bucket", func() bool { - return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, reverseOBCName, reverseBucketName, reverseNotificationName, s3endpoint) + return helper.BucketClient.CheckBucketNotificationSetonRGW(namespace, storeName, reverseOBCName, reverseBucketName, reverseNotificationName, helper, objectStore.Spec.IsTLSEnabled()) }) assert.True(t, notificationPresent) }) diff --git a/tests/integration/ceph_object_test.go b/tests/integration/ceph_object_test.go index 6cb02a5f5f83..44fec756e02b 100644 --- a/tests/integration/ceph_object_test.go +++ b/tests/integration/ceph_object_test.go @@ -157,6 +157,10 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite // now test operation of the first object store testObjectStoreOperations(s, helper, k8sh, namespace, storeName) + + bucketNotificationTestStoreName := "bucket-notification-" + storeName + createCephObjectStore(s.T(), helper, k8sh, namespace, bucketNotificationTestStoreName, 1, tlsEnable) + testBucketNotifications(s, helper, k8sh, namespace, bucketNotificationTestStoreName) } func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { @@ -164,6 +168,7 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * clusterInfo := client.AdminTestClusterInfo(namespace) t := s.T() + logger.Infof("Testing Object Operations on %s", storeName) t.Run("create CephObjectStoreUser", func(t *testing.T) { createCephObjectUser(s, helper, k8sh, namespace, storeName, userid, true, true) i := 0 @@ -362,7 +367,8 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * assert.True(t, k8sh.CheckPodCountAndState("rook-ceph-mgr", namespace, 1, "Running")) }) - t.Run("CephObjectStore should delete now that dependents are gone", func(t *testing.T) { + // tests are complete, now delete the objectstore + s.T().Run("CephObjectStore should delete now that dependents are gone", func(t *testing.T) { // wait initially since it will almost never detect on the first try without this. time.Sleep(3 * time.Second) From beec916f002ee61a06d4e9e14b7d1ca0109b02f0 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Mon, 24 Jan 2022 14:11:28 -0700 Subject: [PATCH 02/13] helm: fix linter issue with end tag Recent versions of helm are failing the linter on the end tag if the tag is "{{- end -}}". Instead, the correct end tag is "{{- end }}". Signed-off-by: Travis Nielsen (cherry picked from commit b1a0918d0eae08db6bd0d5256f6fa514eaf55223) --- deploy/charts/library/templates/_cluster-psp.tpl | 2 +- deploy/charts/library/templates/_cluster-rolebinding.tpl | 2 +- deploy/charts/library/templates/_recommended-labels.tpl | 2 +- .../library/templates/_suffix-cluster-namespace.tpl | 4 ++-- deploy/charts/rook-ceph-cluster/templates/_helpers.tpl | 8 ++++---- deploy/charts/rook-ceph-cluster/templates/rbac.yaml | 4 ++-- deploy/charts/rook-ceph/templates/cluster-rbac.yaml | 2 +- deploy/charts/rook-ceph/templates/clusterrole.yaml | 2 +- deploy/charts/rook-ceph/templates/clusterrolebinding.yaml | 2 +- deploy/charts/rook-ceph/templates/psp.yaml | 2 +- deploy/charts/rook-ceph/templates/rolebinding.yaml | 4 ++-- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/deploy/charts/library/templates/_cluster-psp.tpl b/deploy/charts/library/templates/_cluster-psp.tpl index 1918f7774e86..2d735d1eed35 100644 --- a/deploy/charts/library/templates/_cluster-psp.tpl +++ b/deploy/charts/library/templates/_cluster-psp.tpl @@ -61,4 +61,4 @@ subjects: - kind: ServiceAccount name: rook-ceph-cmd-reporter namespace: {{ .Release.Namespace }} # namespace:cluster -{{- end -}} +{{- end }} diff --git a/deploy/charts/library/templates/_cluster-rolebinding.tpl b/deploy/charts/library/templates/_cluster-rolebinding.tpl index 4196165b2f7f..b9748d40120c 100644 --- a/deploy/charts/library/templates/_cluster-rolebinding.tpl +++ b/deploy/charts/library/templates/_cluster-rolebinding.tpl @@ -90,4 +90,4 @@ subjects: - kind: ServiceAccount name: rook-ceph-purge-osd namespace: {{ .Release.Namespace }} # namespace:cluster -{{- end -}} +{{- end }} diff --git a/deploy/charts/library/templates/_recommended-labels.tpl b/deploy/charts/library/templates/_recommended-labels.tpl index 906755c67d44..546e3b55c604 100644 --- a/deploy/charts/library/templates/_recommended-labels.tpl +++ b/deploy/charts/library/templates/_recommended-labels.tpl @@ -6,4 +6,4 @@ app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/managed-by: helm app.kubernetes.io/created-by: helm helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{- end -}} +{{- end }} diff --git a/deploy/charts/library/templates/_suffix-cluster-namespace.tpl b/deploy/charts/library/templates/_suffix-cluster-namespace.tpl index 6957a910cde9..fdf679340d21 100644 --- a/deploy/charts/library/templates/_suffix-cluster-namespace.tpl +++ b/deploy/charts/library/templates/_suffix-cluster-namespace.tpl @@ -14,5 +14,5 @@ If the cluster namespace is different from the operator namespace, we want to na {{- $clusterNamespace := .Release.Namespace -}} {{- if ne $clusterNamespace $operatorNamespace -}} {{ printf "-%s" $clusterNamespace }} -{{- end -}} -{{- end -}} +{{- end }} +{{- end }} diff --git a/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl b/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl index 96b5fdbfd55a..a8a6fb5e7928 100644 --- a/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl +++ b/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl @@ -3,14 +3,14 @@ Define the clusterName as defaulting to the release namespace */}} {{- define "clusterName" -}} {{ .Values.clusterName | default .Release.Namespace }} -{{- end -}} +{{- end }} {{/* Return the target Kubernetes version. */}} {{- define "capabilities.kubeVersion" -}} {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} -{{- end -}} +{{- end }} {{/* Return the appropriate apiVersion for ingress. @@ -22,5 +22,5 @@ Return the appropriate apiVersion for ingress. {{- print "networking.k8s.io/v1beta1" -}} {{- else -}} {{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} +{{- end }} +{{- end }} diff --git a/deploy/charts/rook-ceph-cluster/templates/rbac.yaml b/deploy/charts/rook-ceph-cluster/templates/rbac.yaml index 7b04b373dece..1e3540572067 100644 --- a/deploy/charts/rook-ceph-cluster/templates/rbac.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/rbac.yaml @@ -14,7 +14,7 @@ clusterrolebindings {{- if .Values.pspEnable }} --- {{ include "library.cluster.psp.rolebindings" . }} -{{- end -}} +{{- end }} {{/* roles @@ -38,4 +38,4 @@ rolebindings {{ include "library.cluster.monitoring.rolebindings" . }} {{- end }} -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/templates/cluster-rbac.yaml b/deploy/charts/rook-ceph/templates/cluster-rbac.yaml index 38a21a3512f2..06dcca401682 100644 --- a/deploy/charts/rook-ceph/templates/cluster-rbac.yaml +++ b/deploy/charts/rook-ceph/templates/cluster-rbac.yaml @@ -18,7 +18,7 @@ clusterrolebindings {{- if .Values.pspEnable }} --- {{ include "library.cluster.psp.rolebindings" . }} -{{- end -}} +{{- end }} {{/* roles diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index f76bbe8832a9..d2af9cbcb2b4 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -535,4 +535,4 @@ rules: - apiGroups: [""] resources: ["serviceaccounts"] verbs: ["get"] -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml index 7663797953a8..c99ec442cc8e 100644 --- a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml @@ -99,4 +99,4 @@ roleRef: kind: ClusterRole name: rbd-external-provisioner-runner apiGroup: rbac.authorization.k8s.io -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/templates/psp.yaml b/deploy/charts/rook-ceph/templates/psp.yaml index 23c5a415ba07..920299873c62 100644 --- a/deploy/charts/rook-ceph/templates/psp.yaml +++ b/deploy/charts/rook-ceph/templates/psp.yaml @@ -168,4 +168,4 @@ subjects: name: rook-csi-rbd-provisioner-sa namespace: {{ .Release.Namespace }} # namespace:operator {{- end }} -{{- end -}} +{{- end }} diff --git a/deploy/charts/rook-ceph/templates/rolebinding.yaml b/deploy/charts/rook-ceph/templates/rolebinding.yaml index 5be43f9caa09..5e5c4e6cebd6 100644 --- a/deploy/charts/rook-ceph/templates/rolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/rolebinding.yaml @@ -47,7 +47,7 @@ roleRef: name: rbd-csi-nodeplugin apiGroup: rbac.authorization.k8s.io --- -{{- end -}} +{{- end }} kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -61,4 +61,4 @@ roleRef: kind: Role name: rbd-external-provisioner-cfg apiGroup: rbac.authorization.k8s.io -{{- end -}} +{{- end }} From e1fd738dd8f1c450577cabb51b59abfae8e5c122 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Mon, 24 Jan 2022 14:12:22 -0700 Subject: [PATCH 03/13] helm: update to the latest helm version v3.8 The CI was building with helm 3.6.2, now updating to the latest v3.8.0 Signed-off-by: Travis Nielsen (cherry picked from commit 50afb9f026b7d79e8500c12d04f550612c22cf11) --- tests/scripts/helm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scripts/helm.sh b/tests/scripts/helm.sh index 0c70a010e9bf..94acf558adf0 100755 --- a/tests/scripts/helm.sh +++ b/tests/scripts/helm.sh @@ -2,7 +2,7 @@ temp="/tmp/rook-tests-scripts-helm" -helm_version="${HELM_VERSION:-"v3.6.2"}" +helm_version="${HELM_VERSION:-"v3.8.0"}" arch="${ARCH:-}" detectArch() { From f35da8d9c47a98752653daa6ced0587a801e0bb3 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Tue, 25 Jan 2022 15:05:18 -0700 Subject: [PATCH 04/13] pool: check for application already being set If the pool application is already set, skip setting it again to avoid a warning message being logged that it is already set. Signed-off-by: Travis Nielsen (cherry picked from commit e0cfb4fce6e4c0420ae50de03398a8b62fa8c2a7) --- pkg/daemon/ceph/client/pool.go | 37 +++++++++++++- pkg/daemon/ceph/client/pool_test.go | 61 ++++++++++++++++++++++- pkg/operator/ceph/pool/controller_test.go | 3 ++ 3 files changed, 98 insertions(+), 3 deletions(-) diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go index c4d024521cf1..345cebb1de59 100644 --- a/pkg/daemon/ceph/client/pool.go +++ b/pkg/daemon/ceph/client/pool.go @@ -116,6 +116,30 @@ func GetPoolNamesByID(context *clusterd.Context, clusterInfo *ClusterInfo) (map[ return names, nil } +func getPoolApplication(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string) (string, error) { + args := []string{"osd", "pool", "application", "get", poolName} + appDetails, err := NewCephCommand(context, clusterInfo, args).Run() + if err != nil { + return "", errors.Wrapf(err, "failed to get current application for pool %s", poolName) + } + + if len(appDetails) == 0 { + // no application name + return "", nil + } + var application map[string]interface{} + err = json.Unmarshal([]byte(appDetails), &application) + if err != nil { + return "", errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(appDetails)) + } + for name := range application { + // Return the first application name in the list since only one is expected + return name, nil + } + // No application name assigned + return "", nil +} + // GetPoolDetails gets all the details of a given pool func GetPoolDetails(context *clusterd.Context, clusterInfo *ClusterInfo, name string) (CephStoragePoolDetails, error) { args := []string{"osd", "pool", "get", name, "all"} @@ -234,10 +258,19 @@ func DeletePool(context *clusterd.Context, clusterInfo *ClusterInfo, name string } func givePoolAppTag(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, appName string) error { + currentAppName, err := getPoolApplication(context, clusterInfo, poolName) + if err != nil { + return errors.Wrapf(err, "failed to get application for pool %q", poolName) + } + if currentAppName == appName { + logger.Infof("application %q is already set on pool %q", appName, poolName) + return nil + } + args := []string{"osd", "pool", "application", "enable", poolName, appName, confirmFlag} - _, err := NewCephCommand(context, clusterInfo, args).Run() + _, err = NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to enable application %s on pool %s", appName, poolName) + return errors.Wrapf(err, "failed to enable application %q on pool %q", appName, poolName) } return nil diff --git a/pkg/daemon/ceph/client/pool_test.go b/pkg/daemon/ceph/client/pool_test.go index 696e07970442..3c6123ea038f 100644 --- a/pkg/daemon/ceph/client/pool_test.go +++ b/pkg/daemon/ceph/client/pool_test.go @@ -29,6 +29,8 @@ import ( "github.com/stretchr/testify/assert" ) +const emptyApplicationName = `{"":{}}` + func TestCreateECPoolWithOverwrites(t *testing.T) { testCreateECPool(t, true, "") } @@ -79,6 +81,9 @@ func testCreateECPool(t *testing.T, overwrite bool, compressionMode string) { } } if args[2] == "application" { + if args[3] == "get" { + return emptyApplicationName, nil + } assert.Equal(t, "enable", args[3]) assert.Equal(t, "mypool", args[4]) assert.Equal(t, "myapp", args[5]) @@ -97,6 +102,53 @@ func testCreateECPool(t *testing.T, overwrite bool, compressionMode string) { } } +func TestSetPoolApplication(t *testing.T) { + poolName := "testpool" + appName := "testapp" + setAppName := false + blankAppName := false + clusterInfo := AdminTestClusterInfo("mycluster") + executor := &exectest.MockExecutor{} + context := &clusterd.Context{Executor: executor} + executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { + logger.Infof("Command: %s %v", command, args) + if args[1] == "pool" && args[2] == "application" { + if args[3] == "get" { + assert.Equal(t, poolName, args[4]) + if blankAppName { + return emptyApplicationName, nil + } else { + return fmt.Sprintf(`{"%s":{}}`, appName), nil + } + } + if args[3] == "enable" { + setAppName = true + assert.Equal(t, poolName, args[4]) + assert.Equal(t, appName, args[5]) + return "", nil + } + } + return "", errors.Errorf("unexpected ceph command %q", args) + } + + t.Run("set pool application", func(t *testing.T) { + setAppName = false + blankAppName = true + err := givePoolAppTag(context, clusterInfo, poolName, appName) + assert.NoError(t, err) + assert.True(t, setAppName) + }) + + t.Run("pool application already set", func(t *testing.T) { + setAppName = false + blankAppName = false + err := givePoolAppTag(context, clusterInfo, poolName, appName) + assert.NoError(t, err) + assert.False(t, setAppName) + }) + +} + func TestCreateReplicaPoolWithFailureDomain(t *testing.T) { testCreateReplicaPool(t, "osd", "mycrushroot", "", "") } @@ -137,6 +189,9 @@ func testCreateReplicaPool(t *testing.T, failureDomain, crushRoot, deviceClass, return "", nil } if args[2] == "application" { + if args[3] == "get" { + return emptyApplicationName, nil + } assert.Equal(t, "enable", args[3]) assert.Equal(t, "mypool", args[4]) assert.Equal(t, "myapp", args[5]) @@ -465,7 +520,11 @@ func testCreatePoolWithReplicasPerFailureDomain(t *testing.T, failureDomain, cru poolRuleSet = true return "", nil } - if len(args) >= 4 && args[1] == "pool" && args[2] == "application" && args[3] == "enable" { + if len(args) >= 4 && args[1] == "pool" && args[2] == "application" { + if args[3] == "get" { + return emptyApplicationName, nil + } + crushRuleName := args[4] assert.Equal(t, crushRuleName, poolSpec.Name) poolAppEnable = true diff --git a/pkg/operator/ceph/pool/controller_test.go b/pkg/operator/ceph/pool/controller_test.go index 21dbec2eca28..9ab58a50c158 100644 --- a/pkg/operator/ceph/pool/controller_test.go +++ b/pkg/operator/ceph/pool/controller_test.go @@ -56,6 +56,9 @@ func TestCreatePool(t *testing.T) { return `{"k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}`, nil } if args[0] == "osd" && args[1] == "pool" && args[2] == "application" { + if args[3] == "get" { + return ``, nil + } assert.Equal(t, "enable", args[3]) if args[5] != "rbd" { enabledMetricsApp = true From d6fd12e99988570d8f3d833f28f3e0d5ac36399f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 26 Jan 2022 15:16:22 +0100 Subject: [PATCH 05/13] core: print the stdout on errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we fail fetching the ceph version let's also print the stdout since ceph tends to put the error here too. Signed-off-by: Sébastien Han (cherry picked from commit e31e4b49b013b2c1e2e32b985d66eb5853595981) --- pkg/daemon/ceph/client/upgrade.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/daemon/ceph/client/upgrade.go b/pkg/daemon/ceph/client/upgrade.go index 17f8c5d57d7d..5d7534386f2b 100644 --- a/pkg/daemon/ceph/client/upgrade.go +++ b/pkg/daemon/ceph/client/upgrade.go @@ -46,7 +46,7 @@ func getCephMonVersionString(context *clusterd.Context, clusterInfo *ClusterInfo args := []string{"version"} buf, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return "", errors.Wrap(err, "failed to run 'ceph version'") + return "", errors.Wrapf(err, "failed to run 'ceph version'. %s", string(buf)) } output := string(buf) logger.Debug(output) From 50d240234e7c894f16a65f9f693dfd27a84d0ab3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 26 Jan 2022 15:17:17 +0100 Subject: [PATCH 06/13] core: add a break line before printing the ceph config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's easier to read the config if there is a new line before instead having a truncated version like: ``` 2022-01-26 06:39:41.421373 D | cephclient: config file @ /etc/ceph/ceph.conf: [global] fsid = 0650a1d5-d688-4575-97fc-7b3052a8a3dd mon initial members = a mon host = [v2:10.104.192.209:3300,v1:10.104.192.209:6789] [client.admin] keyring = /var/lib/rook/rook-ceph/client.admin.keyring ``` Now we have: ``` 2022-01-26 06:39:41.421373 D | cephclient: config file @ /etc/ceph/ceph.conf: [global] fsid = 0650a1d5-d688-4575-97fc-7b3052a8a3dd mon initial members = a mon host = [v2:10.104.192.209:3300,v1:10.104.192.209:6789] [client.admin] keyring = /var/lib/rook/rook-ceph/client.admin.keyring ``` Signed-off-by: Sébastien Han (cherry picked from commit 58c175de4769d73209109df8598d66091cc6a4c0) --- pkg/daemon/ceph/client/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/daemon/ceph/client/config.go b/pkg/daemon/ceph/client/config.go index da2f41494131..1ace5a2f25a8 100644 --- a/pkg/daemon/ceph/client/config.go +++ b/pkg/daemon/ceph/client/config.go @@ -292,7 +292,7 @@ func WriteCephConfig(context *clusterd.Context, clusterInfo *ClusterInfo) error } dst, err := ioutil.ReadFile(DefaultConfigFilePath()) if err == nil { - logger.Debugf("config file @ %s: %s", DefaultConfigFilePath(), dst) + logger.Debugf("config file @ %s:\n%s", DefaultConfigFilePath(), dst) } else { logger.Warningf("wrote and copied config file but failed to read it back from %s for logging. %v", DefaultConfigFilePath(), err) } From 98e9fe0fbcc78ba3cb523bc338c4e28ea02fb20b Mon Sep 17 00:00:00 2001 From: Radoslaw Zarzynski Date: Mon, 3 Jan 2022 15:39:49 +0000 Subject: [PATCH 07/13] osd: allow for injecting extra env. variables via ConfigMap This patch brings a mechanism to define arbitrary environment variables in OSD containers (both prepare and main ones). It bases on idea proposed by Sebastien Han to use an optional `ConfigMap` instance named `aook-ceph-daemon-env-override` as the source of these settings. The need for the patch comes from the fact that, although crimson finally exposes the same CLI interface as the classical OSD, the broadly used development builds have ASan built in. As ASan, by default, complains if it isn't the very first loaded DSO, we need a way to set the `ASAN_OPTIONS` environment variable to `verify_asan_link_order=0` to mitigate the early aborts. Signed-off-by: Radoslaw Zarzynski (cherry picked from commit 17ca41f557867714d747df4c70834798cded590d) --- deploy/examples/osd-env-override.yaml | 19 +++++++++++++++++++ pkg/operator/ceph/cluster/osd/envs.go | 14 ++++++++++++++ .../ceph/cluster/osd/provision_spec.go | 1 + pkg/operator/ceph/cluster/osd/spec.go | 3 +++ 4 files changed, 37 insertions(+) create mode 100644 deploy/examples/osd-env-override.yaml diff --git a/deploy/examples/osd-env-override.yaml b/deploy/examples/osd-env-override.yaml new file mode 100644 index 000000000000..454ccc50a5ed --- /dev/null +++ b/deploy/examples/osd-env-override.yaml @@ -0,0 +1,19 @@ +# ############################################################################################################### +# The `rook-ceph-osd-env-override` ConfigMap is a development feature +# that allows to inject arbitrary environment variables to OSD-related +# containers created by the operator. +# ############################################################################################################### + +apiVersion: v1 +kind: ConfigMap +metadata: + name: rook-ceph-osd-env-override + namespace: rook-ceph +data: + # Bypass the ASan's assertion that it is the very first loaded DSO. + # This is necessary for crimson-osd as it's currently built with + # the ASan sanitizer turned on which means the `libasan.so` must + # the be the very first loaded dynamic library. Unfortunately, this + # isn't fulfilled as the containers use `ld.preload`, so ASan was + # aborting the entire OSD. + ASAN_OPTIONS: verify_asan_link_order=0 diff --git a/pkg/operator/ceph/cluster/osd/envs.go b/pkg/operator/ceph/cluster/osd/envs.go index 11b714d88a65..7e3cf18315ed 100644 --- a/pkg/operator/ceph/cluster/osd/envs.go +++ b/pkg/operator/ceph/cluster/osd/envs.go @@ -32,6 +32,7 @@ const ( osdWalSizeEnvVarName = "ROOK_OSD_WAL_SIZE" osdsPerDeviceEnvVarName = "ROOK_OSDS_PER_DEVICE" osdDeviceClassEnvVarName = "ROOK_OSD_DEVICE_CLASS" + osdConfigMapOverrideName = "rook-ceph-osd-env-override" // EncryptedDeviceEnvVarName is used in the pod spec to indicate whether the OSD is encrypted or not EncryptedDeviceEnvVarName = "ROOK_ENCRYPTED_DEVICE" PVCNameEnvVarName = "ROOK_PVC_NAME" @@ -213,6 +214,19 @@ func osdActivateEnvVar() []v1.EnvVar { return append(cephVolumeEnvVar(), monEnvVars...) } +func getEnvFromSources() []v1.EnvFromSource { + optionalConfigMapRef := true + + return []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{Name: osdConfigMapOverrideName}, + Optional: &optionalConfigMapRef, + }, + }, + } +} + func getTcmallocMaxTotalThreadCacheBytes(tcmallocMaxTotalThreadCacheBytes string) v1.EnvVar { var value string // If empty we read the default value from the file coming with the package diff --git a/pkg/operator/ceph/cluster/osd/provision_spec.go b/pkg/operator/ceph/cluster/osd/provision_spec.go index e9dbec33f25f..c4a7d32b72f6 100644 --- a/pkg/operator/ceph/cluster/osd/provision_spec.go +++ b/pkg/operator/ceph/cluster/osd/provision_spec.go @@ -302,6 +302,7 @@ func (c *Cluster) provisionOSDContainer(osdProps osdProperties, copyBinariesMoun Image: c.spec.CephVersion.Image, VolumeMounts: volumeMounts, Env: envVars, + EnvFrom: getEnvFromSources(), SecurityContext: &v1.SecurityContext{ Privileged: &privileged, RunAsUser: &runAsUser, diff --git a/pkg/operator/ceph/cluster/osd/spec.go b/pkg/operator/ceph/cluster/osd/spec.go index 2f57fa52a661..09e195e733d6 100644 --- a/pkg/operator/ceph/cluster/osd/spec.go +++ b/pkg/operator/ceph/cluster/osd/spec.go @@ -466,6 +466,7 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionC Image: c.rookVersion, VolumeMounts: configVolumeMounts, Env: configEnvVars, + EnvFrom: getEnvFromSources(), SecurityContext: securityContext, }) } @@ -549,6 +550,7 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionC Image: c.spec.CephVersion.Image, VolumeMounts: volumeMounts, Env: envVars, + EnvFrom: getEnvFromSources(), Resources: osdProps.resources, SecurityContext: securityContext, StartupProbe: controller.GenerateStartupProbeExecDaemon(opconfig.OsdType, osdID), @@ -767,6 +769,7 @@ func (c *Cluster) getActivateOSDInitContainer(configDir, namespace, osdID string VolumeMounts: volMounts, SecurityContext: controller.PrivilegedContext(true), Env: envVars, + EnvFrom: getEnvFromSources(), Resources: osdProps.resources, } From 83f627a2e3465dc487bec92d04021d82a3567e07 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Wed, 26 Jan 2022 10:46:43 -0700 Subject: [PATCH 08/13] osd: merge all osd resources including custom The merging of OSD resource limits and requests assumed that only the cpu and memory resources needed to be merged. It is also possible to set custom resource properties such as intel.com/sriov_net_in: '1' for use with multus, so the merging needs to be more general. Signed-off-by: Travis Nielsen (cherry picked from commit f9ac96604806e0796de40b49e343a086dc0fc976) --- pkg/operator/k8sutil/resources.go | 37 ++++++-------------------- pkg/operator/k8sutil/resources_test.go | 9 ++++--- 2 files changed, 13 insertions(+), 33 deletions(-) diff --git a/pkg/operator/k8sutil/resources.go b/pkg/operator/k8sutil/resources.go index 800f87a6c720..c8a01005c796 100644 --- a/pkg/operator/k8sutil/resources.go +++ b/pkg/operator/k8sutil/resources.go @@ -148,37 +148,16 @@ func (info *OwnerInfo) GetUID() types.UID { } func MergeResourceRequirements(first, second v1.ResourceRequirements) v1.ResourceRequirements { - // if the first has a value not set check if second has and set it in first - if _, ok := first.Limits[v1.ResourceCPU]; !ok { - if _, ok = second.Limits[v1.ResourceCPU]; ok { - if first.Limits == nil { - first.Limits = v1.ResourceList{} - } - first.Limits[v1.ResourceCPU] = second.Limits[v1.ResourceCPU] + // if the first has no limits set, apply the second limits if any are specified + if len(first.Limits) == 0 { + if len(second.Limits) > 0 { + first.Limits = second.Limits } } - if _, ok := first.Limits[v1.ResourceMemory]; !ok { - if _, ok = second.Limits[v1.ResourceMemory]; ok { - if first.Limits == nil { - first.Limits = v1.ResourceList{} - } - first.Limits[v1.ResourceMemory] = second.Limits[v1.ResourceMemory] - } - } - if _, ok := first.Requests[v1.ResourceCPU]; !ok { - if _, ok = second.Requests[v1.ResourceCPU]; ok { - if first.Requests == nil { - first.Requests = v1.ResourceList{} - } - first.Requests[v1.ResourceCPU] = second.Requests[v1.ResourceCPU] - } - } - if _, ok := first.Requests[v1.ResourceMemory]; !ok { - if _, ok = second.Requests[v1.ResourceMemory]; ok { - if first.Requests == nil { - first.Requests = v1.ResourceList{} - } - first.Requests[v1.ResourceMemory] = second.Requests[v1.ResourceMemory] + // if the first has no requests set, apply the second requests if any are specified + if len(first.Requests) == 0 { + if len(second.Requests) > 0 { + first.Requests = second.Requests } } return first diff --git a/pkg/operator/k8sutil/resources_test.go b/pkg/operator/k8sutil/resources_test.go index fc47c4fed186..5eaba5fd0987 100644 --- a/pkg/operator/k8sutil/resources_test.go +++ b/pkg/operator/k8sutil/resources_test.go @@ -37,17 +37,18 @@ func TestMergeResourceRequirements(t *testing.T) { first = v1.ResourceRequirements{} second = v1.ResourceRequirements{ Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), + v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI), + v1.ResourceStorage: *resource.NewQuantity(50.0, resource.BinarySI), }, Requests: v1.ResourceList{ - v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI), + v1.ResourceName("foo"): *resource.NewQuantity(23.0, resource.BinarySI), }, } result = MergeResourceRequirements(first, second) - assert.Equal(t, 1, len(result.Limits)) + assert.Equal(t, 2, len(result.Limits)) assert.Equal(t, 1, len(result.Requests)) assert.Equal(t, "100", result.Limits.Cpu().String()) - assert.Equal(t, "1337", result.Requests.Memory().String()) + assert.Equal(t, "50", result.Limits.Storage().String()) first = v1.ResourceRequirements{ Limits: v1.ResourceList{ From 2cd55d7bd64845a6571bff57a26a21e2d00f71e4 Mon Sep 17 00:00:00 2001 From: Mathieu Parent Date: Wed, 26 Jan 2022 07:20:09 +0100 Subject: [PATCH 09/13] csi: bump csi resizer and volume replication images Upgraded images: - k8s.gcr.io/sig-storage/csi-resizer: v1.3.0 -> v1.4.0 - quay.io/csiaddons/volumereplication-operator: v0.1.0 -> v0.3.0 Detailed changelogs: - https://github.com/kubernetes-csi/external-resizer/blob/release-1.4/CHANGELOG/CHANGELOG-1.4.md - https://github.com/csi-addons/volume-replication-operator/tree/v0.2.0 - https://github.com/csi-addons/volume-replication-operator/tree/v0.3.0 Signed-off-by: Mathieu Parent (cherry picked from commit 88a449d09c459962f4102f2a0fda3753924fc806) --- Documentation/ceph-upgrade.md | 8 ++++---- Documentation/helm-operator.md | 4 ++-- deploy/charts/rook-ceph/values.yaml | 4 ++-- deploy/examples/images.txt | 4 ++-- deploy/examples/operator-openshift.yaml | 4 ++-- deploy/examples/operator.yaml | 4 ++-- pkg/operator/ceph/csi/spec.go | 4 ++-- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index 2849a555e0f8..c33ddf82a07b 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -549,9 +549,9 @@ ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.1" ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" -ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" +ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" -CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" +CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0" ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1" ``` @@ -574,9 +574,9 @@ kubectl --namespace rook-ceph get pod -o jsonpath='{range .items[*]}{range .spec k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0 k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 -k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 +k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 quay.io/cephcsi/cephcsi:v3.5.1 -quay.io/csiaddons/volumereplication-operator:v0.1.0 +quay.io/csiaddons/volumereplication-operator:v0.3.0 quay.io/csiaddons/k8s-sidecar:v0.2.1 ``` diff --git a/Documentation/helm-operator.md b/Documentation/helm-operator.md index 0fcab3438c69..8921efdbf89c 100644 --- a/Documentation/helm-operator.md +++ b/Documentation/helm-operator.md @@ -138,14 +138,14 @@ The following tables lists the configurable parameters of the rook-operator char | `csi.rbdPluginUpdateStrategy` | CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | | `csi.cephFSPluginUpdateStrategy` | CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | | `csi.registrar.image` | Kubernetes CSI registrar image. | `k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0` | -| `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.3.0` | +| `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.4.0` | | `csi.provisioner.image` | Kubernetes CSI provisioner image. | `k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0` | | `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0` | | `csi.attacher.image` | Kubernetes CSI Attacher image. | `k8s.gcr.io/sig-storage/csi-attacher:v3.4.0` | | `csi.cephfsPodLabels` | Labels to add to the CSI CephFS Pods. | | | `csi.rbdPodLabels` | Labels to add to the CSI RBD Pods. | | | `csi.volumeReplication.enabled` | Enable Volume Replication. | `false` | -| `csi.volumeReplication.image` | Volume Replication Controller image. | `quay.io/csiaddons/volumereplication-operator:v0.1.0` | +| `csi.volumeReplication.image` | Volume Replication Controller image. | `quay.io/csiaddons/volumereplication-operator:v0.3.0` | | `csi.csiAddons.enabled` | Enable CSIAddons | `false` | | `csi.csiAddons.image` | CSIAddons Sidecar image. | `quay.io/csiaddons/k8s-sidecar:v0.2.1` | | `admissionController.tolerations` | Array of tolerations in YAML format which will be added to admission controller deployment. | | diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml index 1213e5cf8da0..338fc65c4ba7 100644 --- a/deploy/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -286,7 +286,7 @@ csi: #attacher: #image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 #resizer: - #image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 + #image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 # Labels to add to the CSI CephFS Deployments and DaemonSets Pods. #cephfsPodLabels: "key1=value1,key2=value2" # Labels to add to the CSI RBD Deployments and DaemonSets Pods. @@ -296,7 +296,7 @@ csi: # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring volumeReplication: enabled: false - #image: "quay.io/csiaddons/volumereplication-operator:v0.1.0" + #image: "quay.io/csiaddons/volumereplication-operator:v0.3.0" # Enable the CSIAddons sidecar. csiAddons: enabled: false diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt index 1d566afa05f7..09ea3dd89407 100644 --- a/deploy/examples/images.txt +++ b/deploy/examples/images.txt @@ -1,10 +1,10 @@ k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0 k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 - k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 + k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 quay.io/ceph/ceph:v16.2.7 quay.io/cephcsi/cephcsi:v3.5.1 quay.io/csiaddons/k8s-sidecar:v0.2.1 - quay.io/csiaddons/volumereplication-operator:v0.1.0 + quay.io/csiaddons/volumereplication-operator:v0.3.0 rook/ceph:v1.8.3 diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index 5f715576902f..efc3d58ae7fe 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -163,7 +163,7 @@ data: # these images to the desired release of the CSI driver. # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.1" # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" + # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" @@ -417,7 +417,7 @@ data: CSI_ENABLE_VOLUME_REPLICATION: "false" # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" - # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" + # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0" # Enable the csi addons sidecar. CSI_ENABLE_CSIADDONS: "false" # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1" diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index c21aef1c50dc..b127e6e98396 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -81,7 +81,7 @@ data: # these images to the desired release of the CSI driver. # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.1" # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" + # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" @@ -335,7 +335,7 @@ data: # Before enabling, ensure the Volume Replication CRDs are created. # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring CSI_ENABLE_VOLUME_REPLICATION: "false" - # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" + # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0" # Enable the csi addons sidecar. CSI_ENABLE_CSIADDONS: "false" # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1" diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index de394bef79aa..4f545e9927d8 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -106,8 +106,8 @@ var ( DefaultProvisionerImage = "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" DefaultAttacherImage = "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" - DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" - DefaultVolumeReplicationImage = "quay.io/csiaddons/volumereplication-operator:v0.1.0" + DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" + DefaultVolumeReplicationImage = "quay.io/csiaddons/volumereplication-operator:v0.3.0" DefaultCSIAddonsImage = "quay.io/csiaddons/k8s-sidecar:v0.2.1" // Local package template path for RBD From 1d7fea8f522a13412c690739c908ff7aec2af7c6 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 27 Jan 2022 21:17:51 +0530 Subject: [PATCH 10/13] csi: bump csi snapshotter image to v5 updating the csi-snapshotter and dependencies to v5.0.1 released version. Co-authored-by: Mathieu Parent Signed-off-by: Madhu Rajanna (cherry picked from commit cf46615688a76dd9b82b27dfabe8bf0ab4107761) --- Documentation/ceph-upgrade.md | 4 ++-- Documentation/helm-operator.md | 2 +- deploy/charts/rook-ceph/templates/clusterrole.yaml | 12 ++++++------ deploy/charts/rook-ceph/values.yaml | 2 +- deploy/examples/common.yaml | 12 ++++++------ deploy/examples/images.txt | 2 +- deploy/examples/operator-openshift.yaml | 2 +- deploy/examples/operator.yaml | 2 +- pkg/operator/ceph/csi/spec.go | 2 +- tests/framework/utils/snapshot.go | 4 ++-- 10 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index c33ddf82a07b..be1a71589eb3 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -550,7 +550,7 @@ ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4 ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" -ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" +ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0" ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1" ``` @@ -575,7 +575,7 @@ k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0 k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 -k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 +k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 quay.io/cephcsi/cephcsi:v3.5.1 quay.io/csiaddons/volumereplication-operator:v0.3.0 quay.io/csiaddons/k8s-sidecar:v0.2.1 diff --git a/Documentation/helm-operator.md b/Documentation/helm-operator.md index 8921efdbf89c..0ba6d395ec43 100644 --- a/Documentation/helm-operator.md +++ b/Documentation/helm-operator.md @@ -140,7 +140,7 @@ The following tables lists the configurable parameters of the rook-operator char | `csi.registrar.image` | Kubernetes CSI registrar image. | `k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0` | | `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.4.0` | | `csi.provisioner.image` | Kubernetes CSI provisioner image. | `k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0` | -| `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0` | +| `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1` | | `csi.attacher.image` | Kubernetes CSI Attacher image. | `k8s.gcr.io/sig-storage/csi-attacher:v3.4.0` | | `csi.cephfsPodLabels` | Labels to add to the CSI CephFS Pods. | | | `csi.rbdPodLabels` | Labels to add to the CSI RBD Pods. | | diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index d2af9cbcb2b4..e8bf3e4e0cce 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -425,7 +425,7 @@ rules: verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["create", "get", "list", "watch", "update", "delete"] @@ -434,10 +434,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -501,7 +501,7 @@ rules: verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["create", "get", "list", "watch", "update", "delete"] @@ -510,10 +510,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: [""] resources: ["persistentvolumeclaims/status"] verbs: ["update", "patch"] diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml index 338fc65c4ba7..19edea3d0b5b 100644 --- a/deploy/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -282,7 +282,7 @@ csi: #provisioner: #image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 #snapshotter: - #image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + #image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 #attacher: #image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 #resizer: diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml index 254a03b2eb28..7ea23251862c 100644 --- a/deploy/examples/common.yaml +++ b/deploy/examples/common.yaml @@ -66,7 +66,7 @@ rules: verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["create", "get", "list", "watch", "update", "delete"] @@ -75,10 +75,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -160,7 +160,7 @@ rules: verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["create", "get", "list", "watch", "update", "delete"] @@ -169,10 +169,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] - apiGroups: [""] resources: ["persistentvolumeclaims/status"] verbs: ["update", "patch"] diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt index 09ea3dd89407..2e54b09490c2 100644 --- a/deploy/examples/images.txt +++ b/deploy/examples/images.txt @@ -2,7 +2,7 @@ k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0 k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 - k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 quay.io/ceph/ceph:v16.2.7 quay.io/cephcsi/cephcsi:v3.5.1 quay.io/csiaddons/k8s-sidecar:v0.2.1 diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index efc3d58ae7fe..4627006e5dff 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -165,7 +165,7 @@ data: # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" # (Optional) set user created priorityclassName for csi plugin pods. diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index b127e6e98396..b0092c9996ff 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -83,7 +83,7 @@ data: # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" # (Optional) set user created priorityclassName for csi plugin pods. diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index 4f545e9927d8..86c79da4c348 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -105,7 +105,7 @@ var ( DefaultRegistrarImage = "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0" DefaultProvisionerImage = "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0" DefaultAttacherImage = "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0" - DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1" DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0" DefaultVolumeReplicationImage = "quay.io/csiaddons/volumereplication-operator:v0.3.0" DefaultCSIAddonsImage = "quay.io/csiaddons/k8s-sidecar:v0.2.1" diff --git a/tests/framework/utils/snapshot.go b/tests/framework/utils/snapshot.go index b333ec2f6cf8..73d5aafebd1d 100644 --- a/tests/framework/utils/snapshot.go +++ b/tests/framework/utils/snapshot.go @@ -27,7 +27,7 @@ import ( const ( // snapshotterVersion from which the snapshotcontroller and CRD will be // installed - snapshotterVersion = "v4.0.0" + snapshotterVersion = "v5.0.1" repoURL = "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter" rbacPath = "deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml" controllerPath = "deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml" @@ -83,7 +83,7 @@ func (k8sh *K8sHelper) snapshotController(action string) error { // WaitForSnapshotController check snapshotcontroller is ready within given // retries count. func (k8sh *K8sHelper) WaitForSnapshotController(retries int) error { - namespace := "default" + namespace := "kube-system" ctx := context.TODO() snapshotterName := "snapshot-controller" for i := 0; i < retries; i++ { From 954624b5fae28fcb21a2b8f49f14bf0db78ca887 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 27 Jan 2022 13:23:25 +0530 Subject: [PATCH 11/13] helm: add annotations and labels for VSC add support to specify annotations and labels for the volumesnapshotclass. closes: #9655 Signed-off-by: Madhu Rajanna (cherry picked from commit 08dadaa486cea3dca4b254f68fbc0d057f89bce0) --- .../templates/volumesnapshotclass.yaml | 14 ++++++++++++++ deploy/charts/rook-ceph-cluster/values.yaml | 4 ++++ 2 files changed, 18 insertions(+) diff --git a/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml b/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml index 704eb43f9d7b..646824a76424 100644 --- a/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml @@ -11,8 +11,15 @@ apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshotClass metadata: name: {{ $filesystemvsc.name }} +{{- if $filesystemvsc.labels }} + labels: +{{ toYaml $filesystemvsc.labels | indent 4 }} +{{- end }} annotations: snapshot.storage.kubernetes.io/is-default-class: "{{ if default false $filesystemvsc.isDefault }}true{{ else }}false{{ end }}" +{{- if $filesystemvsc.annotations }} +{{ toYaml $filesystemvsc.annotations | indent 4 }} +{{- end }} driver: {{ .Values.operatorNamespace }}.cephfs.csi.ceph.com parameters: clusterID: {{ .Release.Namespace }} @@ -34,8 +41,15 @@ apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshotClass metadata: name: {{ $blockpoolvsc.name }} +{{- if $blockpoolvsc.labels }} + labels: +{{ toYaml $blockpoolvsc.labels | indent 4 }} +{{- end }} annotations: snapshot.storage.kubernetes.io/is-default-class: "{{ if default false $blockpoolvsc.isDefault }}true{{ else }}false{{ end }}" +{{- if $blockpoolvsc.annotations }} +{{ toYaml $blockpoolvsc.annotations | indent 4 }} +{{- end }} driver: {{ .Values.operatorNamespace }}.rbd.csi.ceph.com parameters: clusterID: {{ .Release.Namespace }} diff --git a/deploy/charts/rook-ceph-cluster/values.yaml b/deploy/charts/rook-ceph-cluster/values.yaml index 68c27158bf13..fd3e8f270d9c 100644 --- a/deploy/charts/rook-ceph-cluster/values.yaml +++ b/deploy/charts/rook-ceph-cluster/values.yaml @@ -403,6 +403,8 @@ cephFileSystemVolumeSnapshotClass: name: ceph-filesystem isDefault: true deletionPolicy: Delete + annotations: {} + labels: {} # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#cephfs-snapshots for available configuration parameters: {} @@ -411,6 +413,8 @@ cephBlockPoolsVolumeSnapshotClass: name: ceph-block isDefault: false deletionPolicy: Delete + annotations: {} + labels: {} # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#rbd-snapshots for available configuration parameters: {} From 8885db9b86c0836caafc4684285330b1db7a9e24 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 27 Jan 2022 23:06:16 +0530 Subject: [PATCH 12/13] csi: check deployment for snapshot controller The external-snapshotter was deployed as statefulset in 4.x and now its deployed as a deployment. updated the check in CI to make sure deployment is created. Signed-off-by: Madhu Rajanna (cherry picked from commit 53e12d687fd3284487addd3c21f39f3f23697813) --- tests/framework/utils/snapshot.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/framework/utils/snapshot.go b/tests/framework/utils/snapshot.go index 73d5aafebd1d..c657305e0f9a 100644 --- a/tests/framework/utils/snapshot.go +++ b/tests/framework/utils/snapshot.go @@ -87,17 +87,17 @@ func (k8sh *K8sHelper) WaitForSnapshotController(retries int) error { ctx := context.TODO() snapshotterName := "snapshot-controller" for i := 0; i < retries; i++ { - ss, err := k8sh.Clientset.AppsV1().StatefulSets(namespace).Get(ctx, snapshotterName, metav1.GetOptions{}) + ss, err := k8sh.Clientset.AppsV1().Deployments(namespace).Get(ctx, snapshotterName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } if ss.Status.ReadyReplicas > 0 && ss.Status.ReadyReplicas == ss.Status.Replicas { return nil } - logger.Infof("waiting for %q statufulset in namespace %q (readyreplicas %d < replicas %d)", snapshotterName, namespace, ss.Status.ReadyReplicas, ss.Status.Replicas) + logger.Infof("waiting for %q deployment in namespace %q (readyreplicas %d < replicas %d)", snapshotterName, namespace, ss.Status.ReadyReplicas, ss.Status.Replicas) time.Sleep(RetryInterval * time.Second) } - return fmt.Errorf("giving up waiting for %q statufulset in namespace %q", snapshotterName, namespace) + return fmt.Errorf("giving up waiting for %q deployment in namespace %q", snapshotterName, namespace) } // CreateSnapshotController creates the snapshotcontroller and required RBAC From 6ca7da2bb29a211ef90574c5602e4cbebc1988a6 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Thu, 27 Jan 2022 11:35:06 -0700 Subject: [PATCH 13/13] csi: add patch permission to volumesnapshotcontents The update to the snaphots 5.0 controller requires rbac for patching the volumesnapshotcontents. Signed-off-by: Travis Nielsen (cherry picked from commit 3e4edc0f1d91d44d8e5c57072e66ce97febfe80c) --- deploy/charts/rook-ceph/templates/clusterrole.yaml | 4 ++-- deploy/examples/common.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index e8bf3e4e0cce..a61cd8a2440c 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -428,7 +428,7 @@ rules: verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] @@ -504,7 +504,7 @@ rules: verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml index 7ea23251862c..b560c9adc7fe 100644 --- a/deploy/examples/common.yaml +++ b/deploy/examples/common.yaml @@ -69,7 +69,7 @@ rules: verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] @@ -163,7 +163,7 @@ rules: verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"]