From ac299d43cce890187bec9fb7b28941e979581479 Mon Sep 17 00:00:00 2001 From: shubham Date: Fri, 15 Jan 2021 18:57:45 +0530 Subject: [PATCH 1/2] fix(migrate): update lock file path when csp is scaled down Signed-off-by: shubham --- docs/migration.md | 3 +++ pkg/migrate/cstor/pool.go | 40 +++++++++++++++++++++++++-------------- 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index 08479011..20bdd211 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -125,6 +125,9 @@ I0520 10:01:25.674512 1 pool.go:395] Updating cvr pvc-9cf5a405-12c0-4522-b I0520 10:01:25.798889 1 pool.go:80] Successfully migrated spc sparse-claim to cspc ``` +**Note: In case the job fails for any reason please do not scale up the old CSP deployments. It can lead to data corruption.** + + ## cStor External Provisioned volumes to cStor CSI volumes These instructions will guide you through the process of migrating cStor volumes from the old v1apha1 external provisioned spec to v1 CSI spec. diff --git a/pkg/migrate/cstor/pool.go b/pkg/migrate/cstor/pool.go index cb5a6e6b..52e9c491 100644 --- a/pkg/migrate/cstor/pool.go +++ b/pkg/migrate/cstor/pool.go @@ -41,11 +41,6 @@ import ( ) const ( - replicaPatch = `{ - "spec": { - "replicas": 0 - } -}` cspNameLabel = "cstorpool.openebs.io/name" cspUIDLabel = "cstorpool.openebs.io/uid" cspHostnameAnnotation = "cstorpool.openebs.io/hostname" @@ -398,7 +393,7 @@ func (c *CSPCMigrator) cspTocspi(cspiObj *cstor.CStorPoolInstance) error { } if cspiObj.Annotations[types.OpenEBSDisableReconcileLabelKey] != "" { klog.Infof("Migrating csp %s to cspi %s", cspObj.Name, cspiObj.Name) - err = c.scaleDownDeployment(cspObj, c.OpenebsNamespace) + err = c.scaleDownDeployment(cspObj.Name, cspiObj.Name, c.OpenebsNamespace) if err != nil { return err } @@ -477,24 +472,41 @@ func getCSP(cspLabel string) (*apis.CStorPool, error) { // The old pool pod should be scaled down before the new cspi pod reconcile is // enabled to avoid importing the pool at two places at the same time. -func (c *CSPCMigrator) scaleDownDeployment(cspObj *apis.CStorPool, openebsNamespace string) error { - klog.Infof("Scaling down deployment %s", cspObj.Name) +func (c *CSPCMigrator) scaleDownDeployment(cspName, cspiName, openebsNamespace string) error { + var zero int32 = 0 + klog.Infof("Scaling down deployment %s", cspName) cspDeployList, err := c.KubeClientset.AppsV1(). Deployments(openebsNamespace).List( metav1.ListOptions{ - LabelSelector: "openebs.io/cstor-pool=" + cspObj.Name, + LabelSelector: "openebs.io/cstor-pool=" + cspName, }) if err != nil { return err } if len(cspDeployList.Items) != 1 { - return errors.Errorf("invalid number of csp deployment found for %s: expected 1, got %d", cspObj.Name, len(cspDeployList.Items)) + return errors.Errorf("invalid number of csp deployment found for %s: expected 1, got %d", cspName, len(cspDeployList.Items)) + } + newCSPDeploy := cspDeployList.Items[0] + cspiDeploy, err := c.KubeClientset.AppsV1(). + Deployments(openebsNamespace).Get(cspiName, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to get deployment for cspi %s", cspiName) + } + // While scaling down the csp deployment changing the + // volumes as well so that the zrepl.lock file used by + // csp and cspi becomes the same to avoid data corruption + // due to multiple imports at the same time. + newCSPDeploy.Spec.Replicas = &zero + newCSPDeploy.Spec.Template.Spec.Volumes = cspiDeploy.Spec.Template.Spec.Volumes + patchData, err := GetPatchData(cspDeployList.Items[0], newCSPDeploy) + if err != nil { + return errors.Wrapf(err, "failed to scale down deployment for csp %s", cspName) } _, err = c.KubeClientset.AppsV1().Deployments(openebsNamespace). Patch( cspDeployList.Items[0].Name, k8stypes.StrategicMergePatchType, - []byte(replicaPatch), + patchData, ) if err != nil { return err @@ -503,15 +515,15 @@ func (c *CSPCMigrator) scaleDownDeployment(cspObj *apis.CStorPool, openebsNamesp cspPods, err1 := c.KubeClientset.CoreV1(). Pods(openebsNamespace). List(metav1.ListOptions{ - LabelSelector: "openebs.io/cstor-pool=" + cspObj.Name, + LabelSelector: "openebs.io/cstor-pool=" + cspName, }) if err1 != nil { - klog.Errorf("failed to list pods for csp %s deployment: %s", cspObj.Name, err1.Error()) + klog.Errorf("failed to list pods for csp %s deployment: %s", cspName, err1.Error()) } else { if len(cspPods.Items) == 0 { break } - klog.Infof("waiting for csp %s deployment to scale down", cspObj.Name) + klog.Infof("waiting for csp %s deployment to scale down", cspName) } time.Sleep(10 * time.Second) } From 8ee2623d10f3e06140a28f2c861128b6f7082d34 Mon Sep 17 00:00:00 2001 From: Shubham Bajpai Date: Mon, 18 Jan 2021 14:45:56 +0530 Subject: [PATCH 2/2] Apply suggestions from code review Signed-off-by: shubham shubham.bajpai@mayadata.io Co-authored-by: sai chaithanya --- pkg/migrate/cstor/pool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/migrate/cstor/pool.go b/pkg/migrate/cstor/pool.go index 52e9c491..f5d599e9 100644 --- a/pkg/migrate/cstor/pool.go +++ b/pkg/migrate/cstor/pool.go @@ -474,7 +474,7 @@ func getCSP(cspLabel string) (*apis.CStorPool, error) { // enabled to avoid importing the pool at two places at the same time. func (c *CSPCMigrator) scaleDownDeployment(cspName, cspiName, openebsNamespace string) error { var zero int32 = 0 - klog.Infof("Scaling down deployment %s", cspName) + klog.Infof("Scaling down csp deployment %s", cspName) cspDeployList, err := c.KubeClientset.AppsV1(). Deployments(openebsNamespace).List( metav1.ListOptions{ @@ -500,7 +500,7 @@ func (c *CSPCMigrator) scaleDownDeployment(cspName, cspiName, openebsNamespace s newCSPDeploy.Spec.Template.Spec.Volumes = cspiDeploy.Spec.Template.Spec.Volumes patchData, err := GetPatchData(cspDeployList.Items[0], newCSPDeploy) if err != nil { - return errors.Wrapf(err, "failed to scale down deployment for csp %s", cspName) + return errors.Wrapf(err, "failed to patch data for csp %s", cspName) } _, err = c.KubeClientset.AppsV1().Deployments(openebsNamespace). Patch(