diff --git a/pkg/operator/ceph/cluster/cluster_external.go b/pkg/operator/ceph/cluster/cluster_external.go index 0c0749b7e8a6..8c7e0b4702a1 100644 --- a/pkg/operator/ceph/cluster/cluster_external.go +++ b/pkg/operator/ceph/cluster/cluster_external.go @@ -130,7 +130,9 @@ func (c *ClusterController) configureExternalCephCluster(cluster *cluster) error Monitors: monEndpoints, }, } - err = csi.SaveClusterConfig(c.context.Clientset, c.namespacedName.Namespace, cluster.ClusterInfo, csiConfigEntry) + + clusterId := c.namespacedName.Namespace // cluster id is same as cluster namespace for CephClusters + err = csi.SaveClusterConfig(c.context.Clientset, clusterId, c.namespacedName.Namespace, cluster.ClusterInfo, csiConfigEntry) if err != nil { return errors.Wrap(err, "failed to update csi cluster config") } diff --git a/pkg/operator/ceph/cluster/mon/mon.go b/pkg/operator/ceph/cluster/mon/mon.go index fbceb5f6c186..5215f322bc9f 100644 --- a/pkg/operator/ceph/cluster/mon/mon.go +++ b/pkg/operator/ceph/cluster/mon/mon.go @@ -1112,7 +1112,9 @@ func (c *Cluster) saveMonConfig() error { Monitors: monEndpoints, }, } - if err := csi.SaveClusterConfig(c.context.Clientset, c.Namespace, c.ClusterInfo, csiConfigEntry); err != nil { + + clusterId := c.Namespace // cluster id is same as cluster namespace for CephClusters + if err := csi.SaveClusterConfig(c.context.Clientset, clusterId, c.Namespace, c.ClusterInfo, csiConfigEntry); err != nil { return errors.Wrap(err, "failed to update csi cluster config") } diff --git a/pkg/operator/ceph/csi/cluster_config.go b/pkg/operator/ceph/csi/cluster_config.go index c09fe81f4bcf..76c958a22c8c 100644 --- a/pkg/operator/ceph/csi/cluster_config.go +++ b/pkg/operator/ceph/csi/cluster_config.go @@ -148,7 +148,7 @@ func updateNetNamespaceFilePath(clusterNamespace string, cc csiClusterConfig) { // updateCsiClusterConfig returns a json-formatted string containing // the cluster-to-mon mapping required to configure ceph csi. -func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *CSIClusterConfigEntry) (string, error) { +func updateCsiClusterConfig(curr, clusterID, clusterNamespace string, newCsiClusterConfigEntry *CSIClusterConfigEntry) (string, error) { var ( cc csiClusterConfig centry CSIClusterConfigEntry @@ -166,9 +166,17 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C // independently and won't collide. if newCsiClusterConfigEntry != nil { for i, centry := range cc { + // there is a bug with an unknown source where the csi config can have an empty + // namespace entry. detect and fix this scenario if it occurs + if centry.Namespace == "" && centry.ClusterID == clusterID { + logger.Infof("correcting CSI config map entry for cluster ID %q; empty namespace will be set to %q", clusterID, clusterNamespace) + centry.Namespace = clusterNamespace + cc[i] = centry + } + // If the clusterID belongs to the same cluster, update the entry. // update default clusterID's entry - if clusterKey == centry.Namespace { + if clusterID == centry.Namespace { centry.Monitors = newCsiClusterConfigEntry.Monitors centry.ReadAffinity = newCsiClusterConfigEntry.ReadAffinity centry.CephFS.KernelMountOptions = newCsiClusterConfigEntry.CephFS.KernelMountOptions @@ -178,7 +186,7 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C } } for i, centry := range cc { - if centry.ClusterID == clusterKey { + if centry.ClusterID == clusterID { // If the new entry is nil, this means the entry is being deleted so remove it from the list if newCsiClusterConfigEntry == nil { cc = append(cc[:i], cc[i+1:]...) @@ -214,8 +222,8 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C // If it's the first time we create the cluster, the entry does not exist, so the removal // will fail with a dangling pointer if newCsiClusterConfigEntry != nil { - centry.ClusterID = clusterKey - centry.Namespace = newCsiClusterConfigEntry.Namespace + centry.ClusterID = clusterID + centry.Namespace = clusterNamespace centry.Monitors = newCsiClusterConfigEntry.Monitors centry.RBD = newCsiClusterConfigEntry.RBD centry.CephFS = newCsiClusterConfigEntry.CephFS @@ -227,7 +235,7 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C } } - updateNetNamespaceFilePath(clusterKey, cc) + updateNetNamespaceFilePath(clusterID, cc) return formatCsiClusterConfig(cc) } @@ -300,13 +308,12 @@ func updateCsiConfigMapOwnerRefs(ctx context.Context, namespace string, clientse } // SaveClusterConfig updates the config map used to provide ceph-csi with -// basic cluster configuration. The clusterNamespace and clusterInfo are -// used to determine what "cluster" in the config map will be updated and -// the clusterNamespace value is expected to match the clusterID -// value that is provided to ceph-csi uses in the storage class. -// The locker l is typically a mutex and is used to prevent the config -// map from being updated for multiple clusters simultaneously. -func SaveClusterConfig(clientset kubernetes.Interface, clusterNamespace string, clusterInfo *cephclient.ClusterInfo, newCsiClusterConfigEntry *CSIClusterConfigEntry) error { +// basic cluster configuration. The clusterID, clusterNamespace, and clusterInfo are +// used to determine what "cluster" in the config map will be updated. clusterID should be the same +// as clusterNamespace for CephClusters, but for other resources (e.g., CephBlockPoolRadosNamespace, +// CephFilesystemSubVolumeGroup) or for other supplementary entries, the clusterID should be unique +// and different from the namespace so as not to disrupt CephCluster configurations. +func SaveClusterConfig(clientset kubernetes.Interface, clusterID, clusterNamespace string, clusterInfo *cephclient.ClusterInfo, newCsiClusterConfigEntry *CSIClusterConfigEntry) error { // csi is deployed into the same namespace as the operator csiNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) if csiNamespace == "" { @@ -342,7 +349,7 @@ func SaveClusterConfig(clientset kubernetes.Interface, clusterNamespace string, currData = "[]" } - newData, err := updateCsiClusterConfig(currData, clusterNamespace, newCsiClusterConfigEntry) + newData, err := updateCsiClusterConfig(currData, clusterID, clusterNamespace, newCsiClusterConfigEntry) if err != nil { return errors.Wrap(err, "failed to update csi config map data") } diff --git a/pkg/operator/ceph/csi/cluster_config_test.go b/pkg/operator/ceph/csi/cluster_config_test.go index dca5e4425c61..8dbea7f4ef9f 100644 --- a/pkg/operator/ceph/csi/cluster_config_test.go +++ b/pkg/operator/ceph/csi/cluster_config_test.go @@ -19,6 +19,7 @@ package csi import ( "context" "encoding/json" + "fmt" "reflect" "strings" "testing" @@ -119,7 +120,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { var err error t.Run("add a simple mons list", func(t *testing.T) { - s, err = updateCsiClusterConfig("[]", "rook-ceph-1", &csiClusterConfigEntry) + s, err = updateCsiClusterConfig("[]", "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntry) assert.NoError(t, err) want = `[{"clusterID":"rook-ceph-1","monitors":["1.2.3.4:5000"],"namespace":"rook-ceph-1"}]` compareJSON(t, want, s) @@ -127,7 +128,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { t.Run("add a 2nd mon to the current cluster", func(t *testing.T) { csiClusterConfigEntry.Monitors = append(csiClusterConfigEntry.Monitors, "10.11.12.13:5000") - s, err = updateCsiClusterConfig(s, "rook-ceph-1", &csiClusterConfigEntry) + s, err = updateCsiClusterConfig(s, "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntry) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -139,7 +140,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }) t.Run("add mount options to the current cluster", func(t *testing.T) { - configWithMountOptions, err := updateCsiClusterConfig(s, "rook-ceph-1", &csiClusterConfigEntryMountOptions) + configWithMountOptions, err := updateCsiClusterConfig(s, "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntryMountOptions) assert.NoError(t, err) cc, err := parseCsiClusterConfig(configWithMountOptions) assert.NoError(t, err) @@ -150,7 +151,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }) t.Run("add a 2nd cluster with 3 mons", func(t *testing.T) { - s, err = updateCsiClusterConfig(s, "beta", &csiClusterConfigEntry2) + s, err = updateCsiClusterConfig(s, "beta", "beta", &csiClusterConfigEntry2) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -174,7 +175,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { i := 2 // Remove last element of the slice csiClusterConfigEntry2.Monitors = append(csiClusterConfigEntry2.Monitors[:i], csiClusterConfigEntry2.Monitors[i+1:]...) - s, err = updateCsiClusterConfig(s, "beta", &csiClusterConfigEntry2) + s, err = updateCsiClusterConfig(s, "beta", "beta", &csiClusterConfigEntry2) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -190,7 +191,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }) t.Run("add a 3rd cluster with subvolumegroup", func(t *testing.T) { - s, err = updateCsiClusterConfig(s, "baba", &csiClusterConfigEntry3) + s, err = updateCsiClusterConfig(s, "baba", "baba", &csiClusterConfigEntry3) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -205,7 +206,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }) t.Run("update mount options in presence of subvolumegroup", func(t *testing.T) { - sMntOptionUpdate, err := updateCsiClusterConfig(s, "baba", &csiClusterConfigEntryMountOptions) + sMntOptionUpdate, err := updateCsiClusterConfig(s, "baba", "baba", &csiClusterConfigEntryMountOptions) assert.NoError(t, err) cc, err := parseCsiClusterConfig(sMntOptionUpdate) assert.NoError(t, err) @@ -219,7 +220,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { t.Run("add a 4th mon to the 3rd cluster and subvolumegroup is preserved", func(t *testing.T) { csiClusterConfigEntry3.Monitors = append(csiClusterConfigEntry3.Monitors, "10.11.12.13:5000") - s, err = updateCsiClusterConfig(s, "baba", &csiClusterConfigEntry3) + s, err = updateCsiClusterConfig(s, "baba", "baba", &csiClusterConfigEntry3) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -230,7 +231,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { t.Run("remove subvolumegroup", func(t *testing.T) { csiClusterConfigEntry3.CephFS.SubvolumeGroup = "" - s, err = updateCsiClusterConfig(s, "baba", nil) + s, err = updateCsiClusterConfig(s, "baba", "baba", nil) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -245,7 +246,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }, }, } - s, err = updateCsiClusterConfig(s, "quatre", &csiClusterConfigEntry4) + s, err = updateCsiClusterConfig(s, "quatre", "quatre", &csiClusterConfigEntry4) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -254,7 +255,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { assert.Equal(t, "my-group2", cc[2].CephFS.SubvolumeGroup, cc) csiClusterConfigEntry4.Monitors = []string{"10.1.1.1:5000", "10.1.1.2:5000", "10.1.1.3:5000"} - s, err = updateCsiClusterConfig(s, "quatre", &csiClusterConfigEntry4) + s, err = updateCsiClusterConfig(s, "quatre", "quatre", &csiClusterConfigEntry4) assert.NoError(t, err) cc, err = parseCsiClusterConfig(s) assert.NoError(t, err) @@ -263,7 +264,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }) t.Run("does it return error on garbage input?", func(t *testing.T) { - _, err = updateCsiClusterConfig("qqq", "beta", &csiClusterConfigEntry2) + _, err = updateCsiClusterConfig("qqq", "beta", "beta", &csiClusterConfigEntry2) assert.Error(t, err) }) @@ -278,7 +279,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { Monitors: []string{"1.2.3.4:5000"}, }, } - s, err := updateCsiClusterConfig("[]", clusterIDofCluster1, &csiCluster1ConfigEntry) + s, err := updateCsiClusterConfig("[]", clusterIDofCluster1, clusterIDofCluster1, &csiCluster1ConfigEntry) assert.NoError(t, err) want = `[{"clusterID":"rook-ceph","monitors":["1.2.3.4:5000"],"namespace":"rook-ceph"}]` compareJSON(t, want, s) @@ -292,7 +293,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }, }, } - s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster1, &subVolCsiCluster1Config) + s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster1, clusterIDofCluster1, &subVolCsiCluster1Config) assert.NoError(t, err) cc, err := parseCsiClusterConfig(s) assert.NoError(t, err) @@ -310,7 +311,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }, }, } - s, err = updateCsiClusterConfig(s, radosNSofCluster1, &radosNsCsiCluster1Config) + s, err = updateCsiClusterConfig(s, radosNSofCluster1, clusterIDofCluster1, &radosNsCsiCluster1Config) assert.NoError(t, err) cc, err = parseCsiClusterConfig(s) assert.NoError(t, err) @@ -320,7 +321,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { // update mon IP's and check is it updating for all clusterID's csiCluster1ConfigEntry.Monitors = append(csiCluster1ConfigEntry.Monitors, "1.2.3.10:5000") - s, err = updateCsiClusterConfig(s, clusterIDofCluster1, &csiCluster1ConfigEntry) + s, err = updateCsiClusterConfig(s, clusterIDofCluster1, clusterIDofCluster1, &csiCluster1ConfigEntry) assert.NoError(t, err) cc, err = parseCsiClusterConfig(s) assert.NoError(t, err) @@ -359,11 +360,11 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }, }, } - s, err = updateCsiClusterConfig(s, clusterIDofCluster2, &csiCluster2ConfigEntry) + s, err = updateCsiClusterConfig(s, clusterIDofCluster2, clusterIDofCluster2, &csiCluster2ConfigEntry) assert.NoError(t, err) - s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster2, &subVolCsiCluster2Config) + s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster2, clusterIDofCluster2, &subVolCsiCluster2Config) assert.NoError(t, err) - s, err = updateCsiClusterConfig(s, radosNSofCluster2, &radosNsCsiCluster2Config) + s, err = updateCsiClusterConfig(s, radosNSofCluster2, clusterIDofCluster2, &radosNsCsiCluster2Config) assert.NoError(t, err) cc, err = parseCsiClusterConfig(s) assert.NoError(t, err) @@ -382,7 +383,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { // update mon on 2nd cluster and check is it updating for all clusterID's // of 2nd cluster csiCluster2ConfigEntry.Monitors = append(csiCluster2ConfigEntry.Monitors, "192.168.0.3:5000") - s, err = updateCsiClusterConfig(s, clusterIDofCluster2, &csiCluster2ConfigEntry) + s, err = updateCsiClusterConfig(s, clusterIDofCluster2, clusterIDofCluster2, &csiCluster2ConfigEntry) assert.NoError(t, err) cc, err = parseCsiClusterConfig(s) assert.NoError(t, err) @@ -404,7 +405,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) { // clusterID's of 2nd cluster i := 1 csiCluster2ConfigEntry.Monitors = append(csiCluster2ConfigEntry.Monitors[:i], csiCluster2ConfigEntry.Monitors[i+1:]...) - s, err = updateCsiClusterConfig(s, clusterIDofCluster2, &csiCluster2ConfigEntry) + s, err = updateCsiClusterConfig(s, clusterIDofCluster2, clusterIDofCluster2, &csiCluster2ConfigEntry) assert.NoError(t, err) cc, err = parseCsiClusterConfig(s) assert.NoError(t, err) @@ -423,18 +424,61 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }) t.Run("test multus cluster", func(t *testing.T) { - s, err = updateCsiClusterConfig("[]", "rook-ceph-1", &csiClusterConfigEntryMultus) + s, err = updateCsiClusterConfig("[]", "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntryMultus) assert.NoError(t, err) compareJSON(t, `[{"clusterID":"rook-ceph-1","monitors":["1.2.3.4:5000"],"rbd":{"netNamespaceFilePath":"/var/run/netns/rook-ceph-1","radosNamespace":"rook-ceph-1"},"namespace":"rook-ceph-1"}]`, s) }) t.Run("test crush location labels are set", func(t *testing.T) { - s, err = updateCsiClusterConfig("[]", "rook-ceph-4", &csiClusterConfigEntry4) + s, err = updateCsiClusterConfig("[]", "rook-ceph-4", "rook-ceph-4", &csiClusterConfigEntry4) assert.NoError(t, err) compareJSON(t, `[{"clusterID":"rook-ceph-4","monitors":["10.1.1.1:5000"],"readAffinity": {"enabled": true, "crushLocationLabels":["kubernetes.io/hostname", "topology.kubernetes.io/region","topology.kubernetes.io/zone","topology.rook.io/chassis","topology.rook.io/rack","topology.rook.io/row","topology.rook.io/pdu", "topology.rook.io/pod","topology.rook.io/room","topology.rook.io/datacenter"]},"namespace":"rook-ceph-4"}]`, s) }) + + t.Run("test empty namespace correction", func(t *testing.T) { + currentConfigFormatString := `[` + + `{"clusterID":"cluster-id-and-namespace","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"","subvolumeGroup":"","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"%s"},` + + `{"clusterID":"5bb69c306a7d011c3e91c3cec112fb7a","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"","subvolumeGroup":"csi","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"cluster-id-and-namespace"}` + + `]` + idAndNs := "cluster-id-and-namespace" + + csiConfigEntry := &CSIClusterConfigEntry{ + Namespace: idAndNs, + ClusterInfo: cephcsi.ClusterInfo{ + Monitors: []string{"172.30.100.1:3300"}, + }, + } + currentConfigWithoutNamespace := fmt.Sprintf(currentConfigFormatString, "") + out, err := updateCsiClusterConfig(currentConfigWithoutNamespace, idAndNs, idAndNs, csiConfigEntry) + assert.NoError(t, err) + expectedOutput := fmt.Sprintf(currentConfigFormatString, idAndNs) + assert.Equal(t, expectedOutput, out) + }) + + t.Run("test empty namespace correction and clear netNamespaceFilePath", func(t *testing.T) { + holderEnabled = false + currentConfigFormatString := `[` + + `{"clusterID":"cluster-id-and-namespace","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"%s","subvolumeGroup":"","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"%s","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"%s"},` + + `{"clusterID":"5bb69c306a7d011c3e91c3cec112fb7a","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"","subvolumeGroup":"csi","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"cluster-id-and-namespace"}` + + `]` + cephFsNetNsFilePath := "/var/lib/kubelet/plugins/cluster-id-and-namespace.cephfs.csi.ceph.com/cluster-id-and-namespace.net.ns" + rbdNetNsFilePath := "/var/lib/kubelet/plugins/cluster-id-and-namespace.rbd.csi.ceph.com/cluster-id-and-namespace.net.ns" + idAndNs := "cluster-id-and-namespace" + + csiConfigEntry := &CSIClusterConfigEntry{ + Namespace: idAndNs, + ClusterInfo: cephcsi.ClusterInfo{ + Monitors: []string{"172.30.100.1:3300"}, + }, + } + currentConfigWithoutNamespace := fmt.Sprintf(currentConfigFormatString, cephFsNetNsFilePath, rbdNetNsFilePath, "") + out, err := updateCsiClusterConfig(currentConfigWithoutNamespace, idAndNs, idAndNs, csiConfigEntry) + assert.NoError(t, err) + expectedOutput := fmt.Sprintf(currentConfigFormatString, "", "", idAndNs) + assert.Equal(t, expectedOutput, out) + }) } func contains(src, dest []string) bool { diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index 0d5e37238f82..70bf0c6f4374 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -923,7 +923,8 @@ func (r *ReconcileCSI) configureHolder(driver driverDetails, c ClusterDetail, tp } // Save the path of the network namespace file for ceph-csi to use - err = SaveClusterConfig(r.context.Clientset, c.cluster.Namespace, c.clusterInfo, clusterConfigEntry) + clusterId := c.cluster.Namespace // cluster ID is same as cluster namespace for CephClusters + err = SaveClusterConfig(r.context.Clientset, clusterId, c.cluster.Namespace, c.clusterInfo, clusterConfigEntry) if err != nil { return errors.Wrapf(err, "failed to save cluster config for csi holder %q", driver.fullName) } diff --git a/pkg/operator/ceph/file/subvolumegroup/controller.go b/pkg/operator/ceph/file/subvolumegroup/controller.go index ea7b631a997b..3c01bd0cb42f 100644 --- a/pkg/operator/ceph/file/subvolumegroup/controller.go +++ b/pkg/operator/ceph/file/subvolumegroup/controller.go @@ -198,7 +198,7 @@ func (r *ReconcileCephFilesystemSubVolumeGroup) reconcile(request reconcile.Requ } } - err = csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephFilesystemSubVolumeGroup), r.clusterInfo, nil) + err = csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephFilesystemSubVolumeGroup), cephCluster.Namespace, r.clusterInfo, nil) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to save cluster config") } @@ -307,7 +307,7 @@ func (r *ReconcileCephFilesystemSubVolumeGroup) updateClusterConfig(cephFilesyst csiClusterConfigEntry.CephFS.NetNamespaceFilePath = netNamespaceFilePath } - err := csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephFilesystemSubVolumeGroup), r.clusterInfo, &csiClusterConfigEntry) + err := csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephFilesystemSubVolumeGroup), cephCluster.Namespace, r.clusterInfo, &csiClusterConfigEntry) if err != nil { return errors.Wrap(err, "failed to save cluster config") } diff --git a/pkg/operator/ceph/pool/radosnamespace/controller.go b/pkg/operator/ceph/pool/radosnamespace/controller.go index 5e82029c3400..60f9d2b716e1 100644 --- a/pkg/operator/ceph/pool/radosnamespace/controller.go +++ b/pkg/operator/ceph/pool/radosnamespace/controller.go @@ -198,7 +198,7 @@ func (r *ReconcileCephBlockPoolRadosNamespace) reconcile(request reconcile.Reque return reconcile.Result{}, errors.Wrapf(err, "failed to delete ceph blockpool rados namespace %q", cephBlockPoolRadosNamespace.Name) } } - err = csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephBlockPoolRadosNamespace), r.clusterInfo, nil) + err = csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephBlockPoolRadosNamespace), cephCluster.Namespace, r.clusterInfo, nil) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to save cluster config") } @@ -300,7 +300,7 @@ func (r *ReconcileCephBlockPoolRadosNamespace) updateClusterConfig(cephBlockPool } // Save cluster config in the csi config map - err := csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephBlockPoolRadosNamespace), r.clusterInfo, &csiClusterConfigEntry) + err := csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephBlockPoolRadosNamespace), cephCluster.Namespace, r.clusterInfo, &csiClusterConfigEntry) if err != nil { return errors.Wrap(err, "failed to save cluster config") }