Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(migrate): update lock file path when csp is scaled down #85

Merged
merged 2 commits into from
Jan 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions docs/migration.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,9 @@ I0520 10:01:25.674512 1 pool.go:395] Updating cvr pvc-9cf5a405-12c0-4522-b
I0520 10:01:25.798889 1 pool.go:80] Successfully migrated spc sparse-claim to cspc
```

**<span style="color: red;">Note: In case the job fails for any reason please do not scale up the old CSP deployments. It can lead to data corruption.</span>**


## cStor External Provisioned volumes to cStor CSI volumes

These instructions will guide you through the process of migrating cStor volumes from the old v1apha1 external provisioned spec to v1 CSI spec.
Expand Down
40 changes: 26 additions & 14 deletions pkg/migrate/cstor/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,6 @@ import (
)

const (
replicaPatch = `{
"spec": {
"replicas": 0
}
}`
cspNameLabel = "cstorpool.openebs.io/name"
cspUIDLabel = "cstorpool.openebs.io/uid"
cspHostnameAnnotation = "cstorpool.openebs.io/hostname"
Expand Down Expand Up @@ -398,7 +393,7 @@ func (c *CSPCMigrator) cspTocspi(cspiObj *cstor.CStorPoolInstance) error {
}
if cspiObj.Annotations[types.OpenEBSDisableReconcileLabelKey] != "" {
klog.Infof("Migrating csp %s to cspi %s", cspObj.Name, cspiObj.Name)
err = c.scaleDownDeployment(cspObj, c.OpenebsNamespace)
err = c.scaleDownDeployment(cspObj.Name, cspiObj.Name, c.OpenebsNamespace)
if err != nil {
return err
}
Expand Down Expand Up @@ -477,24 +472,41 @@ func getCSP(cspLabel string) (*apis.CStorPool, error) {

// The old pool pod should be scaled down before the new cspi pod reconcile is
// enabled to avoid importing the pool at two places at the same time.
func (c *CSPCMigrator) scaleDownDeployment(cspObj *apis.CStorPool, openebsNamespace string) error {
klog.Infof("Scaling down deployment %s", cspObj.Name)
func (c *CSPCMigrator) scaleDownDeployment(cspName, cspiName, openebsNamespace string) error {
var zero int32 = 0
klog.Infof("Scaling down csp deployment %s", cspName)
cspDeployList, err := c.KubeClientset.AppsV1().
Deployments(openebsNamespace).List(
metav1.ListOptions{
LabelSelector: "openebs.io/cstor-pool=" + cspObj.Name,
LabelSelector: "openebs.io/cstor-pool=" + cspName,
})
if err != nil {
return err
}
if len(cspDeployList.Items) != 1 {
return errors.Errorf("invalid number of csp deployment found for %s: expected 1, got %d", cspObj.Name, len(cspDeployList.Items))
return errors.Errorf("invalid number of csp deployment found for %s: expected 1, got %d", cspName, len(cspDeployList.Items))
}
newCSPDeploy := cspDeployList.Items[0]
cspiDeploy, err := c.KubeClientset.AppsV1().
Deployments(openebsNamespace).Get(cspiName, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "failed to get deployment for cspi %s", cspiName)
}
// While scaling down the csp deployment changing the
// volumes as well so that the zrepl.lock file used by
// csp and cspi becomes the same to avoid data corruption
// due to multiple imports at the same time.
mittachaitu marked this conversation as resolved.
Show resolved Hide resolved
newCSPDeploy.Spec.Replicas = &zero
newCSPDeploy.Spec.Template.Spec.Volumes = cspiDeploy.Spec.Template.Spec.Volumes
patchData, err := GetPatchData(cspDeployList.Items[0], newCSPDeploy)
if err != nil {
return errors.Wrapf(err, "failed to patch data for csp %s", cspName)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit:

Suggested change
return errors.Wrapf(err, "failed to patch data for csp %s", cspName)
return errors.Wrapf(err, "failed to get patch data for csp deployment %s", cspName)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The error returned by GetPatchData function is wrapped like "failed to generate patch data"

}
_, err = c.KubeClientset.AppsV1().Deployments(openebsNamespace).
Patch(
cspDeployList.Items[0].Name,
k8stypes.StrategicMergePatchType,
[]byte(replicaPatch),
patchData,
)
if err != nil {
return err
Expand All @@ -503,15 +515,15 @@ func (c *CSPCMigrator) scaleDownDeployment(cspObj *apis.CStorPool, openebsNamesp
cspPods, err1 := c.KubeClientset.CoreV1().
Pods(openebsNamespace).
List(metav1.ListOptions{
LabelSelector: "openebs.io/cstor-pool=" + cspObj.Name,
LabelSelector: "openebs.io/cstor-pool=" + cspName,
})
if err1 != nil {
klog.Errorf("failed to list pods for csp %s deployment: %s", cspObj.Name, err1.Error())
klog.Errorf("failed to list pods for csp %s deployment: %s", cspName, err1.Error())
} else {
if len(cspPods.Items) == 0 {
break
}
klog.Infof("waiting for csp %s deployment to scale down", cspObj.Name)
klog.Infof("waiting for csp %s deployment to scale down", cspName)
}
time.Sleep(10 * time.Second)
}
Expand Down