diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index d2d89fcad4ac..a72c33638668 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -755,41 +755,77 @@ jobs: yq w -i pool-test.yaml spec.mirroring.enabled true yq w -i pool-test.yaml spec.mirroring.mode image kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool to created" && sleep 1; done' + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' + + - name: create replicated mirrored pool 2 on cluster 1 + run: | + cd cluster/examples/kubernetes/ceph/ + yq w -i pool-test.yaml metadata.name replicapool2 + kubectl create -f pool-test.yaml + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool2 -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' + yq w -i pool-test.yaml metadata.name replicapool - name: create replicated mirrored pool on cluster 2 run: | cd cluster/examples/kubernetes/ceph/ yq w -i pool-test.yaml metadata.namespace rook-ceph-secondary kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool to created" && sleep 1; done' + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' + + - name: create replicated mirrored pool 2 on cluster 2 + run: | + cd cluster/examples/kubernetes/ceph/ + yq w -i pool-test.yaml metadata.name replicapool2 + kubectl create -f pool-test.yaml + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' - - name: create image in the pool + - name: create images in the pools run: | kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool create test -s 1G kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool/test snapshot kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test + kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool2 create test -s 1G + kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool2/test snapshot + kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test - - name: copy block mirror peer secret into the other cluster + - name: copy block mirror peer secret into the other cluster for replicapool run: | kubectl -n rook-ceph get secret pool-peer-token-replicapool -o yaml |\ sed 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g; s/name: pool-peer-token-replicapool/name: pool-peer-token-replicapool-config/g' |\ kubectl create --namespace=rook-ceph-secondary -f - - - name: add block mirror peer secret to the other cluster + - name: copy block mirror peer secret into the other cluster for replicapool2 (using cluster global peer) + run: | + kubectl -n rook-ceph get secret cluster-peer-token-my-cluster -o yaml |\ + sed 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g; s/name: cluster-peer-token-my-cluster/name: cluster-peer-token-my-cluster-config/g' |\ + kubectl create --namespace=rook-ceph-secondary -f - + + - name: add block mirror peer secret to the other cluster for replicapool run: | kubectl -n rook-ceph-secondary patch cephblockpool replicapool --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["pool-peer-token-replicapool-config"]}}}}' - - name: verify image has been mirrored + - name: add block mirror peer secret to the other cluster for replicapool2 (using cluster global peer) + run: | + kubectl -n rook-ceph-secondary patch cephblockpool replicapool2 --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["cluster-peer-token-my-cluster-config"]}}}}' + + - name: verify image has been mirrored for replicapool + run: | + # let's wait a bit for the image to be present + timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool" && sleep 1; done' + + - name: verify image has been mirrored for replicapool2 run: | # let's wait a bit for the image to be present - timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored" && sleep 1; done' + timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool2 ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool2" && sleep 1; done' - name: display cephblockpool and image status run: | - timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated" && sleep 1; done' - kubectl -n rook-ceph-secondary get cephblockpool -o yaml + timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool" && sleep 1; done' + timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool2" && sleep 1; done' + kubectl -n rook-ceph-secondary get cephblockpool replicapool -o yaml + kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o yaml kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test + kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test - name: create replicated mirrored filesystem on cluster 1 run: | diff --git a/pkg/daemon/ceph/client/mirror.go b/pkg/daemon/ceph/client/mirror.go index 290f875a625f..bdc6ea01a3d6 100644 --- a/pkg/daemon/ceph/client/mirror.go +++ b/pkg/daemon/ceph/client/mirror.go @@ -17,14 +17,33 @@ limitations under the License. package client import ( + "encoding/base64" "encoding/json" "fmt" "io/ioutil" "os" + "strings" "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/util" +) + +// PeerToken is the content of the peer token +type PeerToken struct { + ClusterFSID string `json:"fsid"` + ClientID string `json:"client_id"` + Key string `json:"key"` + MonHost string `json:"mon_host"` + // These fields are added by Rook and NOT part of the output of client.CreateRBDMirrorBootstrapPeer() + PoolID int `json:"pool_id"` + Namespace string `json:"namespace"` +} + +var ( + rbdMirrorPeerCaps = []string{"mon", "profile rbd-mirror-peer", "osd", "profile rbd"} + rbdMirrorPeerKeyringID = "rbd-mirror-peer" ) // ImportRBDMirrorBootstrapPeer add a mirror peer in the rbd-mirror configuration @@ -32,6 +51,7 @@ func ImportRBDMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *Cluste logger.Infof("add rbd-mirror bootstrap peer token for pool %q", poolName) // Token file + // TODO: use mktemp? tokenFilePath := fmt.Sprintf("/tmp/rbd-mirror-token-%s", poolName) // Write token into a file @@ -307,3 +327,58 @@ func ListSnapshotSchedulesRecursively(context *clusterd.Context, clusterInfo *Cl logger.Debugf("successfully recursively listed snapshot schedules for pool %q", poolName) return snapshotSchedulesRecursive, nil } + +/* CreateRBDMirrorBootstrapPeerWithoutPool creates a bootstrap peer for the current cluster +It creates the cephx user for the remote cluster to use with all the necessary details +This function is handy on scenarios where no pools have been created yet but replication communication is required (connecting peers) +It essentially sits above CreateRBDMirrorBootstrapPeer() +and is a cluster-wide option in the scenario where all the pools will be mirrored to the same remote cluster + +So the scenario looks like: + + 1) Create the cephx ID on the source cluster + + 2) Enable a source pool for mirroring - at any time, we just don't know when + rbd --cluster site-a mirror pool enable image-pool image + + 3) Copy the key details over to the other cluster (non-ceph workflow) + + 4) Enable destination pool for mirroring + rbd --cluster site-b mirror pool enable image-pool image + + 5) Add the peer details to the destination pool + + 6) Repeat the steps flipping source and destination to enable + bi-directional mirroring +*/ +func CreateRBDMirrorBootstrapPeerWithoutPool(context *clusterd.Context, clusterInfo *ClusterInfo) ([]byte, error) { + fullClientName := getQualifiedUser(rbdMirrorPeerKeyringID) + logger.Infof("create rbd-mirror bootstrap peer token %q", fullClientName) + key, err := AuthGetOrCreateKey(context, clusterInfo, fullClientName, rbdMirrorPeerCaps) + if err != nil { + return nil, errors.Wrapf(err, "failed to create rbd-mirror peer key %q", fullClientName) + } + logger.Infof("successfully created rbd-mirror bootstrap peer token for cluster %q", clusterInfo.NamespacedName().Name) + + mons := util.NewSet() + for _, mon := range clusterInfo.Monitors { + mons.Add(mon.Endpoint) + } + + peerToken := PeerToken{ + ClusterFSID: clusterInfo.FSID, + ClientID: rbdMirrorPeerKeyringID, + Key: key, + MonHost: strings.Join(mons.ToSlice(), ","), + Namespace: clusterInfo.Namespace, + } + + // Marshal the Go type back to JSON + decodedTokenBackToJSON, err := json.Marshal(peerToken) + if err != nil { + return nil, errors.Wrap(err, "failed to encode peer token to json") + } + + // Return the base64 encoded token + return []byte(base64.StdEncoding.EncodeToString(decodedTokenBackToJSON)), nil +} diff --git a/pkg/daemon/ceph/client/test/info.go b/pkg/daemon/ceph/client/test/info.go index efb03c956ba1..6efd21917de3 100644 --- a/pkg/daemon/ceph/client/test/info.go +++ b/pkg/daemon/ceph/client/test/info.go @@ -62,5 +62,6 @@ func CreateTestClusterInfo(monCount int) *client.ClusterInfo { Endpoint: fmt.Sprintf("1.2.3.%d:6789", (i + 1)), } } + c.SetName(c.Namespace) return c } diff --git a/pkg/operator/ceph/cluster/cluster.go b/pkg/operator/ceph/cluster/cluster.go index 0f14b52ac97e..c6a72069fab2 100755 --- a/pkg/operator/ceph/cluster/cluster.go +++ b/pkg/operator/ceph/cluster/cluster.go @@ -214,6 +214,7 @@ func (c *ClusterController) initializeCluster(cluster *cluster) error { // Populate ClusterInfo with the last value cluster.mons.ClusterInfo = cluster.ClusterInfo + cluster.mons.ClusterInfo.SetName(c.namespacedName.Name) // Start the monitoring if not already started c.configureCephMonitoring(cluster, cluster.ClusterInfo) @@ -572,5 +573,11 @@ func (c *cluster) postMonStartupActions() error { } } + // Create cluster-wide RBD bootstrap peer token + _, err = controller.CreateBootstrapPeerSecret(c.context, c.ClusterInfo, &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: c.namespacedName.Name, Namespace: c.Namespace}}, c.ownerInfo) + if err != nil { + return errors.Wrap(err, "failed to create cluster rbd bootstrap peer token") + } + return nil } diff --git a/pkg/operator/ceph/cluster/controller.go b/pkg/operator/ceph/cluster/controller.go index 528ccd1023a6..bf41cc1f84da 100644 --- a/pkg/operator/ceph/cluster/controller.go +++ b/pkg/operator/ceph/cluster/controller.go @@ -367,6 +367,7 @@ func (c *ClusterController) reconcileCephCluster(clusterObj *cephv1.CephCluster, // It's a new cluster so let's populate the struct cluster = newCluster(clusterObj, c.context, c.csiConfigMutex, ownerInfo) } + cluster.namespacedName = c.namespacedName // Pass down the client to interact with Kubernetes objects // This will be used later down by spec code to create objects like deployment, services etc diff --git a/pkg/operator/ceph/cluster/mon/health.go b/pkg/operator/ceph/cluster/mon/health.go index c817ecb4a1ff..3cc774da251e 100644 --- a/pkg/operator/ceph/cluster/mon/health.go +++ b/pkg/operator/ceph/cluster/mon/health.go @@ -23,6 +23,7 @@ import ( "time" "github.com/pkg/errors" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" cephutil "github.com/rook/rook/pkg/daemon/ceph/util" "github.com/rook/rook/pkg/operator/ceph/controller" @@ -497,6 +498,12 @@ func (c *Cluster) removeMon(daemonName string) error { return errors.Wrapf(err, "failed to save mon config after failing over mon %s", daemonName) } + // Update cluster-wide RBD bootstrap peer token since Monitors have changed + _, err := controller.CreateBootstrapPeerSecret(c.context, c.ClusterInfo, &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: c.ClusterInfo.NamespacedName().Name, Namespace: c.Namespace}}, c.ownerInfo) + if err != nil { + return errors.Wrap(err, "failed to update cluster rbd bootstrap peer token") + } + return nil } diff --git a/pkg/operator/ceph/cluster/mon/health_test.go b/pkg/operator/ceph/cluster/mon/health_test.go index 77b4a1e21ec7..1ed45b1708e5 100644 --- a/pkg/operator/ceph/cluster/mon/health_test.go +++ b/pkg/operator/ceph/cluster/mon/health_test.go @@ -49,6 +49,10 @@ func TestCheckHealth(t *testing.T) { executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + if args[0] == "auth" && args[1] == "get-or-create-key" { + return "{\"key\":\"mysecurekey\"}", nil + } return clienttest.MonInQuorumResponse(), nil }, } @@ -154,7 +158,13 @@ func TestEvictMonOnSameNode(t *testing.T) { clientset := test.New(t, 1) configDir, _ := ioutil.TempDir("", "") defer os.RemoveAll(configDir) - context := &clusterd.Context{Clientset: clientset, ConfigDir: configDir, Executor: &exectest.MockExecutor{}, RequestCancelOrchestration: abool.New()} + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + return "{\"key\":\"mysecurekey\"}", nil + }, + } + context := &clusterd.Context{Clientset: clientset, ConfigDir: configDir, Executor: executor, RequestCancelOrchestration: abool.New()} ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() c := New(context, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 0}, "myversion") @@ -245,6 +255,10 @@ func TestCheckHealthNotFound(t *testing.T) { executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + if args[0] == "auth" && args[1] == "get-or-create-key" { + return "{\"key\":\"mysecurekey\"}", nil + } return clienttest.MonInQuorumResponse(), nil }, } @@ -304,6 +318,10 @@ func TestAddRemoveMons(t *testing.T) { monQuorumResponse := clienttest.MonInQuorumResponse() executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + if args[0] == "auth" && args[1] == "get-or-create-key" { + return "{\"key\":\"mysecurekey\"}", nil + } return monQuorumResponse, nil }, } diff --git a/pkg/operator/ceph/controller/mirror_peer.go b/pkg/operator/ceph/controller/mirror_peer.go index 4b62e0d2b12c..c33e7b798bd0 100644 --- a/pkg/operator/ceph/controller/mirror_peer.go +++ b/pkg/operator/ceph/controller/mirror_peer.go @@ -18,7 +18,8 @@ limitations under the License. package controller import ( - "context" + "encoding/base64" + "encoding/json" "fmt" "github.com/pkg/errors" @@ -29,10 +30,7 @@ import ( v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -41,25 +39,15 @@ const ( poolMirrorBoostrapPeerSecretName = "pool-peer-token" // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name fsMirrorBoostrapPeerSecretName = "fs-peer-token" + // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name + clusterMirrorBoostrapPeerSecretName = "cluster-peer-token" // RBDMirrorBootstrapPeerSecretName #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name RBDMirrorBootstrapPeerSecretName = "rbdMirrorBootstrapPeerSecretName" // FSMirrorBootstrapPeerSecretName #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name FSMirrorBootstrapPeerSecretName = "fsMirrorBootstrapPeerSecretName" ) -// PeerToken is the content of the peer token -type PeerToken struct { - ClusterFSID string `json:"fsid"` - ClientID string `json:"client_id"` - Key string `json:"key"` - MonHost string `json:"mon_host"` - // These fields are added by Rook and NOT part of the output of client.CreateRBDMirrorBootstrapPeer() - PoolID int `json:"pool_id"` - Namespace string `json:"namespace"` -} - -func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, object client.Object, namespacedName types.NamespacedName, scheme *runtime.Scheme) (reconcile.Result, error) { - context := context.TODO() +func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, object client.Object, ownerInfo *k8sutil.OwnerInfo) (reconcile.Result, error) { var err error var ns, name, daemonType string var boostrapToken []byte @@ -80,6 +68,22 @@ func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.Cl return ImmediateRetryResult, errors.Wrap(err, "failed to add extra information to rbd-mirror bootstrap peer") } + case *cephv1.CephCluster: + ns = objectType.Namespace + name = "" // We pass an empty name because this is not a pool + daemonType = "cluster-rbd" + // Create rbd mirror bootstrap peer token + boostrapToken, err = cephclient.CreateRBDMirrorBootstrapPeerWithoutPool(ctx, clusterInfo) + if err != nil { + return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) + } + + // Add additional information to the peer token + boostrapToken, err = expandBootstrapPeerToken(ctx, clusterInfo, name, boostrapToken) + if err != nil { + return ImmediateRetryResult, errors.Wrap(err, "failed to add extra information to rbd-mirror bootstrap peer") + } + case *cephv1.CephFilesystem: ns = objectType.Namespace name = objectType.Name @@ -88,6 +92,7 @@ func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.Cl if err != nil { return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) } + default: return ImmediateRetryResult, errors.Wrap(err, "failed to create bootstrap peer unknown daemon type") } @@ -96,14 +101,14 @@ func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.Cl s := GenerateBootstrapPeerSecret(object, boostrapToken) // set ownerref to the Secret - err = controllerutil.SetControllerReference(object, s, scheme) + err = ownerInfo.SetControllerReference(s) if err != nil { return ImmediateRetryResult, errors.Wrapf(err, "failed to set owner reference for %s-mirror bootstrap peer secret %q", daemonType, s.Name) } // Create Secret - logger.Debugf("store %s-mirror bootstrap token in a Kubernetes Secret %q", daemonType, s.Name) - _, err = ctx.Clientset.CoreV1().Secrets(ns).Create(context, s, metav1.CreateOptions{}) + logger.Debugf("store %s-mirror bootstrap token in a Kubernetes Secret %q in namespace %q", daemonType, s.Name, ns) + _, err = k8sutil.CreateOrUpdateSecret(ctx.Clientset, s) if err != nil && !kerrors.IsAlreadyExists(err) { return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer %q secret", daemonType, s.Name) } @@ -124,6 +129,10 @@ func GenerateBootstrapPeerSecret(object client.Object, token []byte) *v1.Secret entityType = "pool" entityName = objectType.Name entityNamespace = objectType.Namespace + case *cephv1.CephCluster: + entityType = "cluster" + entityName = objectType.Name + entityNamespace = objectType.Namespace } s := &v1.Secret{ @@ -147,6 +156,8 @@ func buildBoostrapPeerSecretName(object client.Object) string { return fmt.Sprintf("%s-%s", fsMirrorBoostrapPeerSecretName, objectType.Name) case *cephv1.CephBlockPool: return fmt.Sprintf("%s-%s", poolMirrorBoostrapPeerSecretName, objectType.Name) + case *cephv1.CephCluster: + return fmt.Sprintf("%s-%s", clusterMirrorBoostrapPeerSecretName, objectType.Name) } return "" @@ -173,7 +184,7 @@ func ValidatePeerToken(object client.Object, data map[string][]byte) error { // Lookup Secret keys and content keysToTest := []string{"token"} switch object.(type) { - case *cephv1.CephBlockPool: + case *cephv1.CephRBDMirror: keysToTest = append(keysToTest, "pool") } @@ -202,13 +213,16 @@ func expandBootstrapPeerToken(ctx *clusterd.Context, clusterInfo *cephclient.Clu } // Fetch the pool ID - poolDetails, err := cephclient.GetPoolDetails(ctx, clusterInfo, poolName) - if err != nil { - return nil, errors.Wrapf(err, "failed to get pool %q details", poolName) + if poolName != "" { + poolDetails, err := cephclient.GetPoolDetails(ctx, clusterInfo, poolName) + if err != nil { + return nil, errors.Wrapf(err, "failed to get pool %q details", poolName) + } + + // Add extra details to the token + decodedTokenToGo.PoolID = poolDetails.Number } - // Add extra details to the token - decodedTokenToGo.PoolID = poolDetails.Number decodedTokenToGo.Namespace = clusterInfo.Namespace // Marshal the Go type back to JSON diff --git a/pkg/operator/ceph/controller/mirror_peer_test.go b/pkg/operator/ceph/controller/mirror_peer_test.go index 13825750276f..b12f704970f7 100644 --- a/pkg/operator/ceph/controller/mirror_peer_test.go +++ b/pkg/operator/ceph/controller/mirror_peer_test.go @@ -33,7 +33,7 @@ import ( func TestValidatePeerToken(t *testing.T) { // Error: map is empty - b := &cephv1.CephBlockPool{} + b := &cephv1.CephRBDMirror{} data := map[string][]byte{} err := ValidatePeerToken(b, data) assert.Error(t, err) @@ -48,13 +48,18 @@ func TestValidatePeerToken(t *testing.T) { err = ValidatePeerToken(b, data) assert.Error(t, err) - // Success CephBlockPool + // Success CephRBDMirror data["pool"] = []byte("foo") err = ValidatePeerToken(b, data) assert.NoError(t, err) // Success CephFilesystem - data["pool"] = []byte("foo") + // "pool" is not required here + delete(data, "pool") + err = ValidatePeerToken(&cephv1.CephFilesystemMirror{}, data) + assert.NoError(t, err) + + // Success CephFilesystem err = ValidatePeerToken(&cephv1.CephFilesystemMirror{}, data) assert.NoError(t, err) } diff --git a/pkg/operator/ceph/file/controller.go b/pkg/operator/ceph/file/controller.go index 406de8527456..ed6db4e486a8 100644 --- a/pkg/operator/ceph/file/controller.go +++ b/pkg/operator/ceph/file/controller.go @@ -308,7 +308,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil // Always create a bootstrap peer token in case another cluster wants to add us as a peer logger.Info("reconciling create cephfs-mirror peer configuration") - reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, r.clusterInfo, cephFilesystem, request.NamespacedName, r.scheme) + reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, r.clusterInfo, cephFilesystem, k8sutil.NewOwnerInfo(cephFilesystem, r.scheme)) if err != nil { updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) return reconcileResponse, errors.Wrapf(err, "failed to create cephfs-mirror bootstrap peer for filesystem %q.", cephFilesystem.Name) diff --git a/pkg/operator/ceph/pool/controller.go b/pkg/operator/ceph/pool/controller.go index 98aaf6117074..7a238b7f5c84 100644 --- a/pkg/operator/ceph/pool/controller.go +++ b/pkg/operator/ceph/pool/controller.go @@ -32,6 +32,7 @@ import ( "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" "github.com/rook/rook/pkg/operator/ceph/cluster/mon" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" + "github.com/rook/rook/pkg/operator/k8sutil" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -282,7 +283,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile logger.Debug("reconciling create rbd mirror peer configuration") if cephBlockPool.Spec.Mirroring.Enabled { // Always create a bootstrap peer token in case another cluster wants to add us as a peer - reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, clusterInfo, cephBlockPool, request.NamespacedName, r.scheme) + reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, clusterInfo, cephBlockPool, k8sutil.NewOwnerInfo(cephBlockPool, r.scheme)) if err != nil { updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) return reconcileResponse, errors.Wrapf(err, "failed to create rbd-mirror bootstrap peer for pool %q.", cephBlockPool.GetName()) diff --git a/pkg/operator/ceph/pool/peers.go b/pkg/operator/ceph/pool/peers.go index 79c34d268875..496a4bcca65e 100644 --- a/pkg/operator/ceph/pool/peers.go +++ b/pkg/operator/ceph/pool/peers.go @@ -52,7 +52,7 @@ func (r *ReconcileCephBlockPool) reconcileAddBoostrapPeer(pool *cephv1.CephBlock } // Import bootstrap peer - err = client.ImportRBDMirrorBootstrapPeer(r.context, r.clusterInfo, string(s.Data["pool"]), string(s.Data["direction"]), s.Data["token"]) + err = client.ImportRBDMirrorBootstrapPeer(r.context, r.clusterInfo, pool.Name, string(s.Data["direction"]), s.Data["token"]) if err != nil { return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to import bootstrap peer token") }