Skip to content

Commit

Permalink
csi: fix missing namespace bug in csi cluster config map
Browse files Browse the repository at this point in the history
Someone testing the Multus holder pod removal feature encountered an
issue where the migration process failed to lead to a system state where
PVCs could be created successfully.

The root cause was found to be a ceph csi config map wherein the primary
CephCluster entry was lacking a value for the "namespace" field.

I observed this once in my development on the holder pod removal
feature, but I was unable to reproduce and assumed it was my own error.
Since this has been seen in a user environment, it must be that the
error is a race condition, and I am unable to determine the exact source
of the bug.

I do not believe this bug would be present if the code that updates the
CSI configmap were properly idempotent, but it has many conditions based
on prior states, and I was unable to determine how to resolve this
underlying impelementation pattern issue.

Instead, I opted to separate the `clusterKey` parameter into two clear
parts:
1. `clusterID` for when `clusterKey` is used as an analogue for
   `clusterID`
2. `clusterNamespace` for when `clusterKey` is used as an analogue for
   `clusterNamespace`

I added unit tests to ensure that SaveClusterConfig() will be able to
detect when the namespace is currently missing, and using the new
`clusterNamespace` field, it should always know what value to use as
input when correcting the bug in already-installed systems.

I also verified that this update works when the function simultaneously
removes netNamespaceFilePath entries, and that those entries are removed
properly.

Finally, manual testing also verifies the change.

Signed-off-by: Blaine Gardner <blaine.gardner@ibm.com>
  • Loading branch information
BlaineEXE committed May 2, 2024
1 parent 8416bef commit 4ff8812
Show file tree
Hide file tree
Showing 7 changed files with 100 additions and 44 deletions.
4 changes: 3 additions & 1 deletion pkg/operator/ceph/cluster/cluster_external.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,9 @@ func (c *ClusterController) configureExternalCephCluster(cluster *cluster) error
Monitors: monEndpoints,
},
}
err = csi.SaveClusterConfig(c.context.Clientset, c.namespacedName.Namespace, cluster.ClusterInfo, csiConfigEntry)

clusterId := c.namespacedName.Namespace // cluster id is same as cluster namespace for CephClusters
err = csi.SaveClusterConfig(c.context.Clientset, clusterId, c.namespacedName.Namespace, cluster.ClusterInfo, csiConfigEntry)
if err != nil {
return errors.Wrap(err, "failed to update csi cluster config")
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/operator/ceph/cluster/mon/mon.go
Original file line number Diff line number Diff line change
Expand Up @@ -1112,7 +1112,9 @@ func (c *Cluster) saveMonConfig() error {
Monitors: monEndpoints,
},
}
if err := csi.SaveClusterConfig(c.context.Clientset, c.Namespace, c.ClusterInfo, csiConfigEntry); err != nil {

clusterId := c.Namespace // cluster id is same as cluster namespace for CephClusters
if err := csi.SaveClusterConfig(c.context.Clientset, clusterId, c.Namespace, c.ClusterInfo, csiConfigEntry); err != nil {
return errors.Wrap(err, "failed to update csi cluster config")
}

Expand Down
35 changes: 21 additions & 14 deletions pkg/operator/ceph/csi/cluster_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ func updateNetNamespaceFilePath(clusterNamespace string, cc csiClusterConfig) {

// updateCsiClusterConfig returns a json-formatted string containing
// the cluster-to-mon mapping required to configure ceph csi.
func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *CSIClusterConfigEntry) (string, error) {
func updateCsiClusterConfig(curr, clusterID, clusterNamespace string, newCsiClusterConfigEntry *CSIClusterConfigEntry) (string, error) {
var (
cc csiClusterConfig
centry CSIClusterConfigEntry
Expand All @@ -166,9 +166,17 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C
// independently and won't collide.
if newCsiClusterConfigEntry != nil {
for i, centry := range cc {
// there is a bug with an unknown source where the csi config can have an empty
// namespace entry. detect and fix this scenario if it occurs
if centry.Namespace == "" && centry.ClusterID == clusterID {
logger.Infof("correcting CSI config map entry for cluster ID %q; empty namespace will be set to %q", clusterID, clusterNamespace)
centry.Namespace = clusterNamespace
cc[i] = centry
}

// If the clusterID belongs to the same cluster, update the entry.
// update default clusterID's entry
if clusterKey == centry.Namespace {
if clusterID == centry.Namespace {
centry.Monitors = newCsiClusterConfigEntry.Monitors
centry.ReadAffinity = newCsiClusterConfigEntry.ReadAffinity
centry.CephFS.KernelMountOptions = newCsiClusterConfigEntry.CephFS.KernelMountOptions
Expand All @@ -178,7 +186,7 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C
}
}
for i, centry := range cc {
if centry.ClusterID == clusterKey {
if centry.ClusterID == clusterID {
// If the new entry is nil, this means the entry is being deleted so remove it from the list
if newCsiClusterConfigEntry == nil {
cc = append(cc[:i], cc[i+1:]...)
Expand Down Expand Up @@ -214,8 +222,8 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C
// If it's the first time we create the cluster, the entry does not exist, so the removal
// will fail with a dangling pointer
if newCsiClusterConfigEntry != nil {
centry.ClusterID = clusterKey
centry.Namespace = newCsiClusterConfigEntry.Namespace
centry.ClusterID = clusterID
centry.Namespace = clusterNamespace
centry.Monitors = newCsiClusterConfigEntry.Monitors
centry.RBD = newCsiClusterConfigEntry.RBD
centry.CephFS = newCsiClusterConfigEntry.CephFS
Expand All @@ -227,7 +235,7 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C
}
}

updateNetNamespaceFilePath(clusterKey, cc)
updateNetNamespaceFilePath(clusterID, cc)
return formatCsiClusterConfig(cc)
}

Expand Down Expand Up @@ -300,13 +308,12 @@ func updateCsiConfigMapOwnerRefs(ctx context.Context, namespace string, clientse
}

// SaveClusterConfig updates the config map used to provide ceph-csi with
// basic cluster configuration. The clusterNamespace and clusterInfo are
// used to determine what "cluster" in the config map will be updated and
// the clusterNamespace value is expected to match the clusterID
// value that is provided to ceph-csi uses in the storage class.
// The locker l is typically a mutex and is used to prevent the config
// map from being updated for multiple clusters simultaneously.
func SaveClusterConfig(clientset kubernetes.Interface, clusterNamespace string, clusterInfo *cephclient.ClusterInfo, newCsiClusterConfigEntry *CSIClusterConfigEntry) error {
// basic cluster configuration. The clusterID, clusterNamespace, and clusterInfo are
// used to determine what "cluster" in the config map will be updated. clusterID should be the same
// as clusterNamespace for CephClusters, but for other resources (e.g., CephBlockPoolRadosNamespace,
// CephFilesystemSubVolumeGroup) or for other supplementary entries, the clusterID should be unique
// and different from the namespace so as not to disrupt CephCluster configurations.
func SaveClusterConfig(clientset kubernetes.Interface, clusterID, clusterNamespace string, clusterInfo *cephclient.ClusterInfo, newCsiClusterConfigEntry *CSIClusterConfigEntry) error {
// csi is deployed into the same namespace as the operator
csiNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar)
if csiNamespace == "" {
Expand Down Expand Up @@ -342,7 +349,7 @@ func SaveClusterConfig(clientset kubernetes.Interface, clusterNamespace string,
currData = "[]"
}

newData, err := updateCsiClusterConfig(currData, clusterNamespace, newCsiClusterConfigEntry)
newData, err := updateCsiClusterConfig(currData, clusterID, clusterNamespace, newCsiClusterConfigEntry)
if err != nil {
return errors.Wrap(err, "failed to update csi config map data")
}
Expand Down
90 changes: 67 additions & 23 deletions pkg/operator/ceph/csi/cluster_config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package csi
import (
"context"
"encoding/json"
"fmt"
"reflect"
"strings"
"testing"
Expand Down Expand Up @@ -119,15 +120,15 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
var err error

t.Run("add a simple mons list", func(t *testing.T) {
s, err = updateCsiClusterConfig("[]", "rook-ceph-1", &csiClusterConfigEntry)
s, err = updateCsiClusterConfig("[]", "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntry)
assert.NoError(t, err)
want = `[{"clusterID":"rook-ceph-1","monitors":["1.2.3.4:5000"],"namespace":"rook-ceph-1"}]`
compareJSON(t, want, s)
})

t.Run("add a 2nd mon to the current cluster", func(t *testing.T) {
csiClusterConfigEntry.Monitors = append(csiClusterConfigEntry.Monitors, "10.11.12.13:5000")
s, err = updateCsiClusterConfig(s, "rook-ceph-1", &csiClusterConfigEntry)
s, err = updateCsiClusterConfig(s, "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntry)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -139,7 +140,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
})

t.Run("add mount options to the current cluster", func(t *testing.T) {
configWithMountOptions, err := updateCsiClusterConfig(s, "rook-ceph-1", &csiClusterConfigEntryMountOptions)
configWithMountOptions, err := updateCsiClusterConfig(s, "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntryMountOptions)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(configWithMountOptions)
assert.NoError(t, err)
Expand All @@ -150,7 +151,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
})

t.Run("add a 2nd cluster with 3 mons", func(t *testing.T) {
s, err = updateCsiClusterConfig(s, "beta", &csiClusterConfigEntry2)
s, err = updateCsiClusterConfig(s, "beta", "beta", &csiClusterConfigEntry2)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -174,7 +175,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
i := 2
// Remove last element of the slice
csiClusterConfigEntry2.Monitors = append(csiClusterConfigEntry2.Monitors[:i], csiClusterConfigEntry2.Monitors[i+1:]...)
s, err = updateCsiClusterConfig(s, "beta", &csiClusterConfigEntry2)
s, err = updateCsiClusterConfig(s, "beta", "beta", &csiClusterConfigEntry2)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -190,7 +191,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
})

t.Run("add a 3rd cluster with subvolumegroup", func(t *testing.T) {
s, err = updateCsiClusterConfig(s, "baba", &csiClusterConfigEntry3)
s, err = updateCsiClusterConfig(s, "baba", "baba", &csiClusterConfigEntry3)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -205,7 +206,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
})

t.Run("update mount options in presence of subvolumegroup", func(t *testing.T) {
sMntOptionUpdate, err := updateCsiClusterConfig(s, "baba", &csiClusterConfigEntryMountOptions)
sMntOptionUpdate, err := updateCsiClusterConfig(s, "baba", "baba", &csiClusterConfigEntryMountOptions)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(sMntOptionUpdate)
assert.NoError(t, err)
Expand All @@ -219,7 +220,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {

t.Run("add a 4th mon to the 3rd cluster and subvolumegroup is preserved", func(t *testing.T) {
csiClusterConfigEntry3.Monitors = append(csiClusterConfigEntry3.Monitors, "10.11.12.13:5000")
s, err = updateCsiClusterConfig(s, "baba", &csiClusterConfigEntry3)
s, err = updateCsiClusterConfig(s, "baba", "baba", &csiClusterConfigEntry3)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -230,7 +231,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {

t.Run("remove subvolumegroup", func(t *testing.T) {
csiClusterConfigEntry3.CephFS.SubvolumeGroup = ""
s, err = updateCsiClusterConfig(s, "baba", nil)
s, err = updateCsiClusterConfig(s, "baba", "baba", nil)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -245,7 +246,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
},
},
}
s, err = updateCsiClusterConfig(s, "quatre", &csiClusterConfigEntry4)
s, err = updateCsiClusterConfig(s, "quatre", "quatre", &csiClusterConfigEntry4)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -254,7 +255,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
assert.Equal(t, "my-group2", cc[2].CephFS.SubvolumeGroup, cc)

csiClusterConfigEntry4.Monitors = []string{"10.1.1.1:5000", "10.1.1.2:5000", "10.1.1.3:5000"}
s, err = updateCsiClusterConfig(s, "quatre", &csiClusterConfigEntry4)
s, err = updateCsiClusterConfig(s, "quatre", "quatre", &csiClusterConfigEntry4)
assert.NoError(t, err)
cc, err = parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -263,7 +264,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
})

t.Run("does it return error on garbage input?", func(t *testing.T) {
_, err = updateCsiClusterConfig("qqq", "beta", &csiClusterConfigEntry2)
_, err = updateCsiClusterConfig("qqq", "beta", "beta", &csiClusterConfigEntry2)
assert.Error(t, err)
})

Expand All @@ -278,7 +279,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
Monitors: []string{"1.2.3.4:5000"},
},
}
s, err := updateCsiClusterConfig("[]", clusterIDofCluster1, &csiCluster1ConfigEntry)
s, err := updateCsiClusterConfig("[]", clusterIDofCluster1, clusterIDofCluster1, &csiCluster1ConfigEntry)
assert.NoError(t, err)
want = `[{"clusterID":"rook-ceph","monitors":["1.2.3.4:5000"],"namespace":"rook-ceph"}]`
compareJSON(t, want, s)
Expand All @@ -292,7 +293,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
},
},
}
s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster1, &subVolCsiCluster1Config)
s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster1, clusterIDofCluster1, &subVolCsiCluster1Config)
assert.NoError(t, err)
cc, err := parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -310,7 +311,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
},
},
}
s, err = updateCsiClusterConfig(s, radosNSofCluster1, &radosNsCsiCluster1Config)
s, err = updateCsiClusterConfig(s, radosNSofCluster1, clusterIDofCluster1, &radosNsCsiCluster1Config)
assert.NoError(t, err)
cc, err = parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -320,7 +321,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {

// update mon IP's and check is it updating for all clusterID's
csiCluster1ConfigEntry.Monitors = append(csiCluster1ConfigEntry.Monitors, "1.2.3.10:5000")
s, err = updateCsiClusterConfig(s, clusterIDofCluster1, &csiCluster1ConfigEntry)
s, err = updateCsiClusterConfig(s, clusterIDofCluster1, clusterIDofCluster1, &csiCluster1ConfigEntry)
assert.NoError(t, err)
cc, err = parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand Down Expand Up @@ -359,11 +360,11 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
},
},
}
s, err = updateCsiClusterConfig(s, clusterIDofCluster2, &csiCluster2ConfigEntry)
s, err = updateCsiClusterConfig(s, clusterIDofCluster2, clusterIDofCluster2, &csiCluster2ConfigEntry)
assert.NoError(t, err)
s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster2, &subVolCsiCluster2Config)
s, err = updateCsiClusterConfig(s, subvolGrpNameofCluster2, clusterIDofCluster2, &subVolCsiCluster2Config)
assert.NoError(t, err)
s, err = updateCsiClusterConfig(s, radosNSofCluster2, &radosNsCsiCluster2Config)
s, err = updateCsiClusterConfig(s, radosNSofCluster2, clusterIDofCluster2, &radosNsCsiCluster2Config)
assert.NoError(t, err)
cc, err = parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -382,7 +383,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
// update mon on 2nd cluster and check is it updating for all clusterID's
// of 2nd cluster
csiCluster2ConfigEntry.Monitors = append(csiCluster2ConfigEntry.Monitors, "192.168.0.3:5000")
s, err = updateCsiClusterConfig(s, clusterIDofCluster2, &csiCluster2ConfigEntry)
s, err = updateCsiClusterConfig(s, clusterIDofCluster2, clusterIDofCluster2, &csiCluster2ConfigEntry)
assert.NoError(t, err)
cc, err = parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -404,7 +405,7 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
// clusterID's of 2nd cluster
i := 1
csiCluster2ConfigEntry.Monitors = append(csiCluster2ConfigEntry.Monitors[:i], csiCluster2ConfigEntry.Monitors[i+1:]...)
s, err = updateCsiClusterConfig(s, clusterIDofCluster2, &csiCluster2ConfigEntry)
s, err = updateCsiClusterConfig(s, clusterIDofCluster2, clusterIDofCluster2, &csiCluster2ConfigEntry)
assert.NoError(t, err)
cc, err = parseCsiClusterConfig(s)
assert.NoError(t, err)
Expand All @@ -423,18 +424,61 @@ func TestUpdateCsiClusterConfig(t *testing.T) {
})

t.Run("test multus cluster", func(t *testing.T) {
s, err = updateCsiClusterConfig("[]", "rook-ceph-1", &csiClusterConfigEntryMultus)
s, err = updateCsiClusterConfig("[]", "rook-ceph-1", "rook-ceph-1", &csiClusterConfigEntryMultus)
assert.NoError(t, err)
compareJSON(t, `[{"clusterID":"rook-ceph-1","monitors":["1.2.3.4:5000"],"rbd":{"netNamespaceFilePath":"/var/run/netns/rook-ceph-1","radosNamespace":"rook-ceph-1"},"namespace":"rook-ceph-1"}]`, s)
})

t.Run("test crush location labels are set", func(t *testing.T) {
s, err = updateCsiClusterConfig("[]", "rook-ceph-4", &csiClusterConfigEntry4)
s, err = updateCsiClusterConfig("[]", "rook-ceph-4", "rook-ceph-4", &csiClusterConfigEntry4)
assert.NoError(t, err)
compareJSON(t, `[{"clusterID":"rook-ceph-4","monitors":["10.1.1.1:5000"],"readAffinity": {"enabled": true, "crushLocationLabels":["kubernetes.io/hostname",
"topology.kubernetes.io/region","topology.kubernetes.io/zone","topology.rook.io/chassis","topology.rook.io/rack","topology.rook.io/row","topology.rook.io/pdu",
"topology.rook.io/pod","topology.rook.io/room","topology.rook.io/datacenter"]},"namespace":"rook-ceph-4"}]`, s)
})

t.Run("test empty namespace correction", func(t *testing.T) {
currentConfigFormatString := `[` +
`{"clusterID":"cluster-id-and-namespace","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"","subvolumeGroup":"","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"%s"},` +
`{"clusterID":"5bb69c306a7d011c3e91c3cec112fb7a","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"","subvolumeGroup":"csi","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"cluster-id-and-namespace"}` +
`]`
idAndNs := "cluster-id-and-namespace"

csiConfigEntry := &CSIClusterConfigEntry{
Namespace: idAndNs,
ClusterInfo: cephcsi.ClusterInfo{
Monitors: []string{"172.30.100.1:3300"},
},
}
currentConfigWithoutNamespace := fmt.Sprintf(currentConfigFormatString, "")
out, err := updateCsiClusterConfig(currentConfigWithoutNamespace, idAndNs, idAndNs, csiConfigEntry)
assert.NoError(t, err)
expectedOutput := fmt.Sprintf(currentConfigFormatString, idAndNs)
assert.Equal(t, expectedOutput, out)
})

t.Run("test empty namespace correction and clear netNamespaceFilePath", func(t *testing.T) {
holderEnabled = false
currentConfigFormatString := `[` +
`{"clusterID":"cluster-id-and-namespace","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"%s","subvolumeGroup":"","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"%s","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"%s"},` +
`{"clusterID":"5bb69c306a7d011c3e91c3cec112fb7a","monitors":["172.30.100.1:3300"],"cephFS":{"netNamespaceFilePath":"","subvolumeGroup":"csi","kernelMountOptions":"","fuseMountOptions":""},"rbd":{"netNamespaceFilePath":"","radosNamespace":""},"nfs":{"netNamespaceFilePath":""},"readAffinity":{"enabled":false,"crushLocationLabels":null},"namespace":"cluster-id-and-namespace"}` +
`]`
cephFsNetNsFilePath := "/var/lib/kubelet/plugins/cluster-id-and-namespace.cephfs.csi.ceph.com/cluster-id-and-namespace.net.ns"
rbdNetNsFilePath := "/var/lib/kubelet/plugins/cluster-id-and-namespace.rbd.csi.ceph.com/cluster-id-and-namespace.net.ns"
idAndNs := "cluster-id-and-namespace"

csiConfigEntry := &CSIClusterConfigEntry{
Namespace: idAndNs,
ClusterInfo: cephcsi.ClusterInfo{
Monitors: []string{"172.30.100.1:3300"},
},
}
currentConfigWithoutNamespace := fmt.Sprintf(currentConfigFormatString, cephFsNetNsFilePath, rbdNetNsFilePath, "")
out, err := updateCsiClusterConfig(currentConfigWithoutNamespace, idAndNs, idAndNs, csiConfigEntry)
assert.NoError(t, err)
expectedOutput := fmt.Sprintf(currentConfigFormatString, "", "", idAndNs)
assert.Equal(t, expectedOutput, out)
})
}

func contains(src, dest []string) bool {
Expand Down
3 changes: 2 additions & 1 deletion pkg/operator/ceph/csi/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -923,7 +923,8 @@ func (r *ReconcileCSI) configureHolder(driver driverDetails, c ClusterDetail, tp
}

// Save the path of the network namespace file for ceph-csi to use
err = SaveClusterConfig(r.context.Clientset, c.cluster.Namespace, c.clusterInfo, clusterConfigEntry)
clusterId := c.cluster.Namespace // cluster ID is same as cluster namespace for CephClusters
err = SaveClusterConfig(r.context.Clientset, clusterId, c.cluster.Namespace, c.clusterInfo, clusterConfigEntry)
if err != nil {
return errors.Wrapf(err, "failed to save cluster config for csi holder %q", driver.fullName)
}
Expand Down
Loading

0 comments on commit 4ff8812

Please sign in to comment.