From b4ba10297b9fa43a15111d21e488d22483170a5b Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 4 Nov 2020 17:13:48 +0100 Subject: [PATCH 1/6] check resize mode on update events --- .../templates/clusterrole.yaml | 6 ++++++ manifests/operator-service-account-rbac.yaml | 2 ++ pkg/cluster/cluster.go | 18 ++++++++++++++---- pkg/cluster/sync.go | 4 ++-- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index 84da313d9..00ee776f5 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -105,6 +105,10 @@ rules: - delete - get - list +{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }} + - patch + - update +{{- end }} # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" @@ -113,7 +117,9 @@ rules: verbs: - get - list +{{- if toString .Values.configKubernetes.storage_resize_mode | eq "ebs" }} - update # only for resizing AWS volumes +{{- end }} # to watch Spilo pods and do rolling updates. Creation via StatefulSet - apiGroups: - "" diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 15ed7f53b..1ba5b4d23 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -106,6 +106,8 @@ rules: - delete - get - list + - patch + - update # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 18778fd41..595919d19 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -671,13 +671,23 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // Volume if oldSpec.Spec.Size != newSpec.Spec.Size { - c.logger.Debugf("syncing persistent volumes") c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume) - if err := c.syncVolumes(); err != nil { - c.logger.Errorf("could not sync persistent volumes: %v", err) - updateFailed = true + if c.OpConfig.StorageResizeMode == "pvc" { + c.logger.Debugf("syncing persistent volume claims using %q resize mode", c.OpConfig.StorageResizeMode) + if err := c.syncVolumeClaims(); err != nil { + c.logger.Errorf("could not sync persistent volume claims: %v", err) + updateFailed = true + } + } else if c.OpConfig.StorageResizeMode == "ebs" { + c.logger.Debugf("syncing persistent volumes using %q resize mode", c.OpConfig.StorageResizeMode) + if err := c.syncVolumes(); err != nil { + c.logger.Errorf("could not sync persistent volumes: %v", err) + updateFailed = true + } } + } else { + c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") } // Statefulset diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index ee7672a5b..9c9ac0023 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -58,7 +58,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } if c.OpConfig.StorageResizeMode == "pvc" { - c.logger.Debugf("syncing persistent volume claims") + c.logger.Debugf("syncing persistent volume claims using %q resize mode", c.OpConfig.StorageResizeMode) if err = c.syncVolumeClaims(); err != nil { err = fmt.Errorf("could not sync persistent volume claims: %v", err) return err @@ -70,7 +70,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { // TODO: handle the case of the cluster that is downsized and enlarged again // (there will be a volume from the old pod for which we can't act before the // the statefulset modification is concluded) - c.logger.Debugf("syncing persistent volumes") + c.logger.Debugf("syncing persistent volumes using %q resize mode", c.OpConfig.StorageResizeMode) if err = c.syncVolumes(); err != nil { err = fmt.Errorf("could not sync persistent volumes: %v", err) return err From eacb11a274b0b906f5daf480dd1997343960dec4 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 6 Nov 2020 12:19:42 +0100 Subject: [PATCH 2/6] add unit test for PVC resizing --- pkg/cluster/cluster.go | 2 +- pkg/cluster/volumes.go | 2 +- pkg/cluster/volumes_test.go | 111 ++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+), 2 deletions(-) create mode 100644 pkg/cluster/volumes_test.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 595919d19..20ee68dc3 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -140,7 +140,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres Secrets: make(map[types.UID]*v1.Secret), Services: make(map[PostgresRole]*v1.Service), Endpoints: make(map[PostgresRole]*v1.Endpoints)}, - userSyncStrategy: users.DefaultUserSyncStrategy{password_encryption}, + userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: password_encryption}, deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}, podEventsQueue: podEventsQueue, KubeClient: kubeClient, diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index d5c08c2e2..44b85663f 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -62,7 +62,7 @@ func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error { if err != nil { return fmt.Errorf("could not parse volume size: %v", err) } - _, newSize, err := c.listVolumesWithManifestSize(newVolume) + newSize := quantityToGigabyte(newQuantity) for _, pvc := range pvcs { volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) if volumeSize >= newSize { diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go new file mode 100644 index 000000000..1a1073603 --- /dev/null +++ b/pkg/cluster/volumes_test.go @@ -0,0 +1,111 @@ +package cluster + +import ( + "testing" + + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stretchr/testify/assert" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "k8s.io/client-go/kubernetes/fake" +) + +func NewFakeKubernetesClient() (k8sutil.KubernetesClient, *fake.Clientset) { + clientSet := fake.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PersistentVolumeClaimsGetter: clientSet.CoreV1(), + }, clientSet +} + +func TestResizeVolumeClaim(t *testing.T) { + testName := "test resizing of persistent volume claims" + client, _ := NewFakeKubernetesClient() + clusterName := "acid-test-cluster" + namespace := "default" + newVolumeSize := "2Gi" + + // new cluster with pvc resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "pvc", + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + // define and create PVCs for 1Gi volumes + parsedStorage, err := resource.ParseQuantity("1Gi") + assert.NoError(t, err) + + pvcList := &v1.PersistentVolumeClaimList{ + Items: []v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pgdata-" + clusterName + "-0", + Namespace: namespace, + Labels: filterLabels, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: parsedStorage, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pgdata-" + clusterName + "-1", + Namespace: namespace, + Labels: filterLabels, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: parsedStorage, + }, + }, + }, + }, + }, + } + + for _, pvc := range pvcList.Items { + cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) + } + + // test resizing + cluster.resizeVolumeClaims(acidv1.Volume{Size: newVolumeSize}) + + pvcs, err := cluster.listPersistentVolumeClaims() + assert.NoError(t, err) + + if len(pvcs) != len(pvcList.Items) { + t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)) + } + + for _, pvc := range pvcs { + newStorageSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) + expectedQuantity, err := resource.ParseQuantity(newVolumeSize) + assert.NoError(t, err) + expectedSize := quantityToGigabyte(expectedQuantity) + if newStorageSize != expectedSize { + t.Errorf("%s: resizing failed, got %v, expected %v", testName, newStorageSize, expectedSize) + } + } +} From 58984cafabe6d0da510736ee5d2b1af950a77193 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 6 Nov 2020 14:29:02 +0100 Subject: [PATCH 3/6] set resize mode to pvc in charts and manifests --- charts/postgres-operator/values-crd.yaml | 2 +- charts/postgres-operator/values.yaml | 2 +- manifests/configmap.yaml | 2 +- manifests/postgresql-operator-default-configuration.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 6196c6fb2..71c2d5bb1 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -136,7 +136,7 @@ configKubernetes: # whether the Spilo container should run in privileged mode spilo_privileged: false # storage resize strategy, available options are: ebs, pvc, off - storage_resize_mode: ebs + storage_resize_mode: pvc # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 231b0b9ac..95865503d 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -130,7 +130,7 @@ configKubernetes: # whether the Spilo container should run in privileged mode spilo_privileged: "false" # storage resize strategy, available options are: ebs, pvc, off - storage_resize_mode: ebs + storage_resize_mode: pvc # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index e59bfcea0..59283fd6e 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -107,7 +107,7 @@ data: # spilo_runasgroup: 103 # spilo_fsgroup: 103 spilo_privileged: "false" - # storage_resize_mode: "off" + storage_resize_mode: "pvc" super_username: postgres # team_admin_role: "admin" # team_api_role_configuration: "log_statement:all" diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 14acc4356..84537e06a 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -72,7 +72,7 @@ configuration: # spilo_runasgroup: 103 # spilo_fsgroup: 103 spilo_privileged: false - storage_resize_mode: ebs + storage_resize_mode: pvc # toleration: {} # watched_namespace: "" postgres_pod_resources: From 84b306365fbc9fa242c456c21c136f82d53bbbc5 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 6 Nov 2020 16:27:56 +0100 Subject: [PATCH 4/6] add test for quantityToGigabyte --- pkg/cluster/volumes_test.go | 38 +++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go index 1a1073603..139195835 100644 --- a/pkg/cluster/volumes_test.go +++ b/pkg/cluster/volumes_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "k8s.io/client-go/kubernetes/fake" ) @@ -56,7 +57,7 @@ func TestResizeVolumeClaim(t *testing.T) { Items: []v1.PersistentVolumeClaim{ { ObjectMeta: metav1.ObjectMeta{ - Name: "pgdata-" + clusterName + "-0", + Name: constants.DataVolumeName + "-" + clusterName + "-0", Namespace: namespace, Labels: filterLabels, }, @@ -70,7 +71,7 @@ func TestResizeVolumeClaim(t *testing.T) { }, { ObjectMeta: metav1.ObjectMeta{ - Name: "pgdata-" + clusterName + "-1", + Name: constants.DataVolumeName + "-" + clusterName + "-1", Namespace: namespace, Labels: filterLabels, }, @@ -109,3 +110,36 @@ func TestResizeVolumeClaim(t *testing.T) { } } } + +func TestQuantityToGigabyte(t *testing.T) { + tests := []struct { + name string + quantityStr string + expected int64 + }{ + { + "test with 1Gi", + "1Gi", + 1, + }, + { + "test with float", + "1.5Gi", + int64(1), + }, + { + "test with 1000Mi", + "1000Mi", + int64(0), + }, + } + + for _, tt := range tests { + quantity, err := resource.ParseQuantity(tt.quantityStr) + assert.NoError(t, err) + gigabyte := quantityToGigabyte(quantity) + if gigabyte != tt.expected { + t.Errorf("%s: got %v, expected %v", tt.name, gigabyte, tt.expected) + } + } +} From fe12184f1790f33926bf5c6097ac4a8153cb3882 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 6 Nov 2020 17:22:52 +0100 Subject: [PATCH 5/6] just one debug line for syncing volumes --- pkg/cluster/cluster.go | 4 +--- pkg/cluster/sync.go | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 20ee68dc3..433ed46b4 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -672,15 +672,13 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // Volume if oldSpec.Spec.Size != newSpec.Spec.Size { c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume) - + c.logger.Debugf("syncing volumes using %q resize mode", c.OpConfig.StorageResizeMode) if c.OpConfig.StorageResizeMode == "pvc" { - c.logger.Debugf("syncing persistent volume claims using %q resize mode", c.OpConfig.StorageResizeMode) if err := c.syncVolumeClaims(); err != nil { c.logger.Errorf("could not sync persistent volume claims: %v", err) updateFailed = true } } else if c.OpConfig.StorageResizeMode == "ebs" { - c.logger.Debugf("syncing persistent volumes using %q resize mode", c.OpConfig.StorageResizeMode) if err := c.syncVolumes(); err != nil { c.logger.Errorf("could not sync persistent volumes: %v", err) updateFailed = true diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 9c9ac0023..1be52e907 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -57,8 +57,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return err } + c.logger.Debugf("syncing volumes using %q resize mode", c.OpConfig.StorageResizeMode) if c.OpConfig.StorageResizeMode == "pvc" { - c.logger.Debugf("syncing persistent volume claims using %q resize mode", c.OpConfig.StorageResizeMode) if err = c.syncVolumeClaims(); err != nil { err = fmt.Errorf("could not sync persistent volume claims: %v", err) return err @@ -70,7 +70,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { // TODO: handle the case of the cluster that is downsized and enlarged again // (there will be a volume from the old pod for which we can't act before the // the statefulset modification is concluded) - c.logger.Debugf("syncing persistent volumes using %q resize mode", c.OpConfig.StorageResizeMode) if err = c.syncVolumes(); err != nil { err = fmt.Errorf("could not sync persistent volumes: %v", err) return err From 8d67c78e4363a48705095691de1fb9e96fa0f90a Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 11 Nov 2020 11:26:01 +0100 Subject: [PATCH 6/6] extend test and update log msg --- pkg/cluster/cluster.go | 2 +- pkg/cluster/sync.go | 2 +- pkg/cluster/volumes_test.go | 38 +++++++++++++++++++++++++++++++------ 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 0602f0a15..7ec7be176 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -672,7 +672,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // Volume if oldSpec.Spec.Size != newSpec.Spec.Size { c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume) - c.logger.Debugf("syncing volumes using %q resize mode", c.OpConfig.StorageResizeMode) + c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode) if c.OpConfig.StorageResizeMode == "pvc" { if err := c.syncVolumeClaims(); err != nil { c.logger.Errorf("could not sync persistent volume claims: %v", err) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 1be52e907..61be7919d 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -57,7 +57,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return err } - c.logger.Debugf("syncing volumes using %q resize mode", c.OpConfig.StorageResizeMode) + c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode) if c.OpConfig.StorageResizeMode == "pvc" { if err = c.syncVolumeClaims(); err != nil { err = fmt.Errorf("could not sync persistent volume claims: %v", err) diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go index 139195835..49fbbd228 100644 --- a/pkg/cluster/volumes_test.go +++ b/pkg/cluster/volumes_test.go @@ -8,6 +8,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "github.com/stretchr/testify/assert" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -32,7 +33,7 @@ func TestResizeVolumeClaim(t *testing.T) { namespace := "default" newVolumeSize := "2Gi" - // new cluster with pvc resize mode and configured labels + // new cluster with pvc storage resize mode and configured labels var cluster = New( Config{ OpConfig: config.Config{ @@ -50,7 +51,7 @@ func TestResizeVolumeClaim(t *testing.T) { filterLabels := cluster.labelsSet(false) // define and create PVCs for 1Gi volumes - parsedStorage, err := resource.ParseQuantity("1Gi") + storage1Gi, err := resource.ParseQuantity("1Gi") assert.NoError(t, err) pvcList := &v1.PersistentVolumeClaimList{ @@ -64,7 +65,7 @@ func TestResizeVolumeClaim(t *testing.T) { Spec: v1.PersistentVolumeClaimSpec{ Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceStorage: parsedStorage, + v1.ResourceStorage: storage1Gi, }, }, }, @@ -78,7 +79,21 @@ func TestResizeVolumeClaim(t *testing.T) { Spec: v1.PersistentVolumeClaimSpec{ Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceStorage: parsedStorage, + v1.ResourceStorage: storage1Gi, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: constants.DataVolumeName + "-" + clusterName + "-2-0", + Namespace: namespace, + Labels: labels.Set{}, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: storage1Gi, }, }, }, @@ -96,10 +111,12 @@ func TestResizeVolumeClaim(t *testing.T) { pvcs, err := cluster.listPersistentVolumeClaims() assert.NoError(t, err) - if len(pvcs) != len(pvcList.Items) { - t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)) + // check if listPersistentVolumeClaims returns only the PVCs matching the filter + if len(pvcs) != len(pvcList.Items)-1 { + t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1) } + // check if PVCs were correctly resized for _, pvc := range pvcs { newStorageSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) expectedQuantity, err := resource.ParseQuantity(newVolumeSize) @@ -109,6 +126,15 @@ func TestResizeVolumeClaim(t *testing.T) { t.Errorf("%s: resizing failed, got %v, expected %v", testName, newStorageSize, expectedSize) } } + + // check if other PVC was not resized + pvc2, err := cluster.KubeClient.PersistentVolumeClaims(namespace).Get(context.TODO(), constants.DataVolumeName+"-"+clusterName+"-2-0", metav1.GetOptions{}) + assert.NoError(t, err) + unchangedSize := quantityToGigabyte(pvc2.Spec.Resources.Requests[v1.ResourceStorage]) + expectedSize := quantityToGigabyte(storage1Gi) + if unchangedSize != expectedSize { + t.Errorf("%s: volume size changed, got %v, expected %v", testName, unchangedSize, expectedSize) + } } func TestQuantityToGigabyte(t *testing.T) {