From c663b878c78dd4bc05b476a3c3711f13d464a897 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 5 Oct 2020 12:48:54 +0200 Subject: [PATCH 01/40] First commit --- pkg/cluster/cluster.go | 27 ++++++------- pkg/cluster/database.go | 4 +- pkg/cluster/resources.go | 86 ++++++++++++++++++---------------------- pkg/cluster/sync.go | 36 ++++++++--------- pkg/cluster/types.go | 2 +- 5 files changed, 73 insertions(+), 82 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index adf6e30e8..940af7e5b 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -55,8 +55,8 @@ type Config struct { // K8S objects that are belongs to a connection pooler type ConnectionPoolerObjects struct { - Deployment map[PostgresRole]*appsv1.Deployment - Service map[PostgresRole]*v1.Service + Deployment *appsv1.Deployment + Service *v1.Service // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. @@ -72,7 +72,6 @@ type kubeResources struct { Endpoints map[PostgresRole]*v1.Endpoints Secrets map[types.UID]*v1.Secret Statefulset *appsv1.StatefulSet - ConnectionPooler *ConnectionPoolerObjects PodDisruptionBudget *policybeta1.PodDisruptionBudget //Pods are treated separately //PVCs are treated separately @@ -102,7 +101,7 @@ type Cluster struct { currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex - + ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects } type compareStatefulsetResult struct { @@ -346,19 +345,19 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - roles := c.RolesConnectionPooler() - for _, r := range roles { - if c.ConnectionPooler != nil { + for _, r := range c.RolesConnectionPooler() { + if c.ConnectionPooler[r] != nil { c.logger.Warning("Connection pooler already exists in the cluster") return nil + + connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) + if err != nil { + c.logger.Warningf("could not create connection pooler: %v", err) + return nil + } + c.logger.Infof("connection pooler %q has been successfully created for the role %v", + util.NameFromMeta(connectionPooler.Deployment.ObjectMeta), r) } - connectionPooler, err := c.createConnectionPooler(c.installLookupFunction) - if err != nil { - c.logger.Warningf("could not create connection pooler: %v", err) - return nil - } - c.logger.Infof("connection pooler %q has been successfully created", - util.NameFromMeta(connectionPooler.Deployment[r].ObjectMeta)) } return nil diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index 1a38bd41d..d12360c27 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -463,7 +463,7 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi // Creates a connection pool credentials lookup function in every database to // perform remote authentication. -func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { +func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role PostgresRole) error { var stmtBytes bytes.Buffer c.logger.Info("Installing lookup function") @@ -542,6 +542,6 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { } } - c.ConnectionPooler.LookupFunction = true + c.ConnectionPooler[role].LookupFunction = true return nil } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 717a7f45f..419013339 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -101,16 +101,10 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { // // After that create all the objects for connection pooler, namely a deployment // with a chosen pooler and a service to expose it. -func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoolerObjects, error) { +func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { var msg string c.setProcessName("creating connection pooler") - if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} - c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) - c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) - } - schema := c.Spec.ConnectionPooler.Schema if schema == "" { @@ -122,49 +116,47 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo user = c.OpConfig.ConnectionPooler.User } - err := lookup(schema, user) + err := lookup(schema, user, role) if err != nil { msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } - if c.needConnectionPooler() { - roles := c.RolesConnectionPooler() - for _, r := range roles { - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, r) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + if c.ConnectionPooler[role] == nil { + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) + } + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } - if err != nil { - return nil, err - } + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - serviceSpec := c.generateConnectionPoolerService(&c.Spec, r) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + if err != nil { + return nil, err + } - if err != nil { - return nil, err - } - c.ConnectionPooler.Deployment[r] = deployment - c.ConnectionPooler.Service[r] = service + serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - } + if err != nil { + return nil, err } + c.ConnectionPooler[role].Deployment = deployment + c.ConnectionPooler[role].Service = service + + c.logger.Debugf("created new connection pooler %q, uid: %q", + util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - return c.ConnectionPooler, nil + return c.ConnectionPooler[role], nil } func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { @@ -181,7 +173,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate var deployment *appsv1.Deployment - deployment = c.ConnectionPooler.Deployment[role] + deployment = c.ConnectionPooler[role].Deployment policy := metav1.DeletePropagationForeground options := metav1.DeleteOptions{PropagationPolicy: &policy} @@ -206,7 +198,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Repeat the same for the service object var service *v1.Service - service = c.ConnectionPooler.Service[role] + service = c.ConnectionPooler[role].Service if service != nil { @@ -861,7 +853,7 @@ func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget { // the check were already done before. func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler.Deployment[role] == nil { + if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } @@ -874,9 +866,9 @@ func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeploym // worker at one time will try to update it chances of conflicts are // minimal. deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler.Deployment[role].Namespace).Patch( + Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler.Deployment[role].Name, + c.ConnectionPooler[role].Deployment.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, @@ -885,7 +877,7 @@ func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeploym return nil, fmt.Errorf("could not patch deployment: %v", err) } - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment return deployment, nil } @@ -897,9 +889,9 @@ func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]strin if err != nil { return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) } - result, err := c.KubeClient.Deployments(c.ConnectionPooler.Deployment[role].Namespace).Patch( + result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler.Deployment[role].Name, + c.ConnectionPooler[role].Deployment.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}, diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index c4a788970..da868fd30 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -857,9 +857,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) } if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} - c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) - c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) + //c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) + //c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) } if newNeedConnectionPooler { @@ -872,7 +872,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, // in this case also do not forget to install lookup function as for // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler.LookupFunction { + if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { newConnectionPooler := newSpec.Spec.ConnectionPooler specSchema := "" @@ -891,7 +891,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, specUser, c.OpConfig.ConnectionPooler.User) - if err = lookup(schema, user); err != nil { + if err = lookup(schema, user, role); err != nil { return NoSync, err } } @@ -913,14 +913,14 @@ func (c *Cluster) syncConnectionPooler(oldSpec, } } if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment[role] != nil || - c.ConnectionPooler.Service[role] != nil) { + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { if err = c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } } - if c.ConnectionPooler != nil && c.ConnectionPooler.Deployment[otherRole] == nil && c.ConnectionPooler.Service[otherRole] == nil { + if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { c.ConnectionPooler = nil } } @@ -936,13 +936,13 @@ func (c *Cluster) syncConnectionPooler(oldSpec, } } if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment[role] != nil || - c.ConnectionPooler.Service[role] != nil) { + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { if err = c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } - } else if c.ConnectionPooler.Deployment[otherRole] == nil && c.ConnectionPooler.Service[otherRole] == nil { + } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { c.ConnectionPooler = nil } } @@ -979,12 +979,12 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return NoSync, err } - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment } else if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler @@ -1018,7 +1018,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql return reason, fmt.Errorf(msg, err) } - oldDeploymentSpec := c.ConnectionPooler.Deployment[role] + oldDeploymentSpec := c.ConnectionPooler[role].Deployment deployment, err := c.updateConnectionPoolerDeployment( oldDeploymentSpec, @@ -1028,13 +1028,13 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return reason, err } - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment return reason, nil } } - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler.Deployment[role].Annotations) + newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) if newAnnotations != nil { c.updateConnectionPoolerAnnotations(newAnnotations, role) } @@ -1055,14 +1055,14 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return NoSync, err } - c.ConnectionPooler.Service[role] = service + c.ConnectionPooler[role].Service = service } else if err != nil { msg := "could not get connection pooler service to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler.Service[role] = service + c.ConnectionPooler[role].Service = service } return NoSync, nil diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 199914ccc..8aa519817 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -72,7 +72,7 @@ type ClusterStatus struct { type TemplateParams map[string]interface{} -type InstallFunction func(schema string, user string) error +type InstallFunction func(schema string, user string, role PostgresRole) error type SyncReason []string From d6b4f8260fd8b4da76f2a67334881b96fe73e3cd Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 6 Oct 2020 16:39:12 +0200 Subject: [PATCH 02/40] Make connection_pooler a separate package --- pkg/cluster/k8sres.go | 102 ---- pkg/cluster/resources.go | 193 ------ pkg/cluster/sync.go | 229 ------- pkg/connection_pooler/connection_pooler.go | 666 +++++++++++++++++++++ 4 files changed, 666 insertions(+), 524 deletions(-) create mode 100644 pkg/connection_pooler/connection_pooler.go diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index ecbe03212..c5bef5102 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2266,108 +2266,6 @@ func (c *Cluster) ownerReferences() []metav1.OwnerReference { } } -func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( - *appsv1.Deployment, error) { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - if *numberOfInstances < constants.ConnectionPoolerMinInstances { - msg := "Adjusted number of connection pooler instances from %d to %d" - c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) - - *numberOfInstances = constants.ConnectionPoolerMinInstances - } - - if err != nil { - return nil, err - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this deployment, but there is a hope that this object - // will be garbage collected if something went wrong and operator - // didn't deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: appsv1.DeploymentSpec{ - Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(role), - Template: *podTemplate, - }, - } - - return deployment, nil -} - -func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - serviceSpec := v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: c.connectionPoolerName(role), - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, - }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(role), - }, - } - - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this service, but there is a hope that this object will - // be garbage collected if something went wrong and operator didn't - // deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: serviceSpec, - } - - return service -} - func ensurePath(file string, defaultDir string, defaultFile string) string { if file == "" { return path.Join(defaultDir, defaultFile) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 419013339..fc06bad08 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -94,145 +94,6 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { return statefulSet, nil } -// Prepare the database for connection pooler to be used, i.e. install lookup -// function (do it first, because it should be fast and if it didn't succeed, -// it doesn't makes sense to create more K8S objects. At this moment we assume -// that necessary connection pooler user exists. -// -// After that create all the objects for connection pooler, namely a deployment -// with a chosen pooler and a service to expose it. -func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { - var msg string - c.setProcessName("creating connection pooler") - - schema := c.Spec.ConnectionPooler.Schema - - if schema == "" { - schema = c.OpConfig.ConnectionPooler.Schema - } - - user := c.Spec.ConnectionPooler.User - if user == "" { - user = c.OpConfig.ConnectionPooler.User - } - - err := lookup(schema, user, role) - - if err != nil { - msg = "could not prepare database for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - if c.ConnectionPooler[role] == nil { - c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) - } - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - - serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - c.ConnectionPooler[role].Deployment = deployment - c.ConnectionPooler[role].Service = service - - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - - return c.ConnectionPooler[role], nil -} - -func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { - c.setProcessName("deleting connection pooler") - c.logger.Debugln("deleting connection pooler") - - // Lack of connection pooler objects is not a fatal error, just log it if - // it was present before in the manifest - if c.ConnectionPooler == nil { - c.logger.Infof("No connection pooler to delete") - return nil - } - - // Clean up the deployment object. If deployment resource we've remembered - // is somehow empty, try to delete based on what would we generate - var deployment *appsv1.Deployment - deployment = c.ConnectionPooler[role].Deployment - - policy := metav1.DeletePropagationForeground - options := metav1.DeleteOptions{PropagationPolicy: &policy} - - if deployment != nil { - - // set delete propagation policy to foreground, so that replica set will be - // also deleted. - - err = c.KubeClient. - Deployments(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) - - if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler deployment was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete deployment: %v", err) - } - - c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) - } - - // Repeat the same for the service object - var service *v1.Service - service = c.ConnectionPooler[role].Service - - if service != nil { - - err = c.KubeClient. - Services(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) - - if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler service was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete service: %v", err) - } - - c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) - } - // Repeat the same for the secret object - secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) - - secret, err := c.KubeClient. - Secrets(c.Namespace). - Get(context.TODO(), secretName, metav1.GetOptions{}) - - if err != nil { - c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) - } else { - if err = c.deleteSecret(secret.UID, *secret); err != nil { - return fmt.Errorf("could not delete pooler secret: %v", err) - } - } - - c.ConnectionPooler = nil - return nil -} - func getPodIndex(podName string) (int32, error) { parts := strings.Split(podName, "-") if len(parts) == 0 { @@ -848,57 +709,3 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet { func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget { return c.PodDisruptionBudget } - -// Perform actual patching of a connection pooler deployment, assuming that all -// the check were already done before. -func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { - c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { - return nil, fmt.Errorf("there is no connection pooler in the cluster") - } - - patchData, err := specPatch(newDeployment.Spec) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment: %v", err) - } - - // An update probably requires RetryOnConflict, but since only one operator - // worker at one time will try to update it chances of conflicts are - // minimal. - deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( - context.TODO(), - c.ConnectionPooler[role].Deployment.Name, - types.MergePatchType, - patchData, - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch deployment: %v", err) - } - - c.ConnectionPooler[role].Deployment = deployment - - return deployment, nil -} - -//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { - c.logger.Debugf("updating connection pooler annotations") - patchData, err := metaAnnotationsPatch(annotations) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) - } - result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( - context.TODO(), - c.ConnectionPooler[role].Deployment.Name, - types.MergePatchType, - []byte(patchData), - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) - } - return result, nil - -} diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index da868fd30..49408fbf7 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -838,232 +838,3 @@ func (c *Cluster) syncLogicalBackupJob() error { return nil } - -func (c *Cluster) syncConnectionPooler(oldSpec, - newSpec *acidv1.Postgresql, - lookup InstallFunction) (SyncReason, error) { - - var reason SyncReason - var err error - var newNeedConnectionPooler, oldNeedConnectionPooler bool - - // Check and perform the sync requirements for each of the roles. - for _, role := range [2]PostgresRole{Master, Replica} { - if role == Master { - newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) - } else { - newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) - } - if c.ConnectionPooler == nil { - c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) - //c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) - //c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) - } - - if newNeedConnectionPooler { - // Try to sync in any case. If we didn't needed connection pooler before, - // it means we want to create it. If it was already present, still sync - // since it could happen that there is no difference in specs, and all - // the resources are remembered, but the deployment was manually deleted - // in between - c.logger.Debug("syncing connection pooler") - - // in this case also do not forget to install lookup function as for - // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { - newConnectionPooler := newSpec.Spec.ConnectionPooler - - specSchema := "" - specUser := "" - - if newConnectionPooler != nil { - specSchema = newConnectionPooler.Schema - specUser = newConnectionPooler.User - } - - schema := util.Coalesce( - specSchema, - c.OpConfig.ConnectionPooler.Schema) - - user := util.Coalesce( - specUser, - c.OpConfig.ConnectionPooler.User) - - if err = lookup(schema, user, role); err != nil { - return NoSync, err - } - } - - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) - return reason, err - } - } - - if oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } - if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil - } - } - - if !oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources if not empty - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil - } - } - } - - return reason, nil -} - -// Synchronize connection pooler resources. Effectively we're interested only in -// synchronizing the corresponding deployment, but in case of deployment or -// service is missing, create it. After checking, also remember an object for -// the future references. -func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( - SyncReason, error) { - - deployment, err := c.KubeClient. - Deployments(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Deployment %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) - - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return NoSync, fmt.Errorf(msg, err) - } - - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - c.ConnectionPooler[role].Deployment = deployment - } else if err != nil { - msg := "could not get connection pooler deployment to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - c.ConnectionPooler[role].Deployment = deployment - - // actual synchronization - oldConnectionPooler := oldSpec.Spec.ConnectionPooler - newConnectionPooler := newSpec.Spec.ConnectionPooler - - // sync implementation below assumes that both old and new specs are - // not nil, but it can happen. To avoid any confusion like updating a - // deployment because the specification changed from nil to an empty - // struct (that was initialized somewhere before) replace any nil with - // an empty spec. - if oldConnectionPooler == nil { - oldConnectionPooler = &acidv1.ConnectionPooler{} - } - - if newConnectionPooler == nil { - newConnectionPooler = &acidv1.ConnectionPooler{} - } - - c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) - - specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) - reason := append(specReason, defaultsReason...) - - if specSync || defaultsSync { - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(role), reason) - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg := "could not generate deployment for connection pooler: %v" - return reason, fmt.Errorf(msg, err) - } - - oldDeploymentSpec := c.ConnectionPooler[role].Deployment - - deployment, err := c.updateConnectionPoolerDeployment( - oldDeploymentSpec, - newDeploymentSpec, - role) - - if err != nil { - return reason, err - } - c.ConnectionPooler[role].Deployment = deployment - - return reason, nil - } - } - - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) - if newAnnotations != nil { - c.updateConnectionPoolerAnnotations(newAnnotations, role) - } - - service, err := c.KubeClient. - Services(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Service %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) - - serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - c.ConnectionPooler[role].Service = service - - } else if err != nil { - msg := "could not get connection pooler service to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler[role].Service = service - } - - return NoSync, nil -} diff --git a/pkg/connection_pooler/connection_pooler.go b/pkg/connection_pooler/connection_pooler.go new file mode 100644 index 000000000..8a218ce7f --- /dev/null +++ b/pkg/connection_pooler/connection_pooler.go @@ -0,0 +1,666 @@ +package connection_pooler + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/r3labs/diff" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/cluster" + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" +) + +// K8S objects that are belongs to a connection pooler +type ConnectionPoolerObjects struct { + Deployment *appsv1.Deployment + Service *v1.Service + Name string + // It could happen that a connection pooler was enabled, but the operator + // was not able to properly process a corresponding event or was restarted. + // In this case we will miss missing/require situation and a lookup function + // will not be installed. To avoid synchronizing it all the time to prevent + // this, we can remember the result in memory at least until the next + // restart. + LookupFunction bool +} + +// Prepare the database for connection pooler to be used, i.e. install lookup +// function (do it first, because it should be fast and if it didn't succeed, +// it doesn't makes sense to create more K8S objects. At this moment we assume +// that necessary connection pooler user exists. +// +// After that create all the objects for connection pooler, namely a deployment +// with a chosen pooler and a service to expose it. + +// have connectionpooler name in the cp object to have it immutable name +// add these cp related functions to a new cp file +// opConfig, cluster, and database name +func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.InstallFunction, role cluster.PostgresRole, c cluster.Cluster) (*ConnectionPoolerObjects, error) { + var msg string + c.setProcessName("creating connection pooler") + + schema := c.Spec.ConnectionPooler.Schema + + if schema == "" { + schema = c.OpConfig.ConnectionPooler.Schema + } + + user := c.Spec.ConnectionPooler.User + if user == "" { + user = c.OpConfig.ConnectionPooler.User + } + + err := c.lookup(schema, user, role) + + if err != nil { + msg = "could not prepare database for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + if c.ConnectionPooler[role] == nil { + c.ConnectionPooler = make(map[c.PostgresRole]*ConnectionPoolerObjects) + c.ConnectionPooler[role].Deployment = nil + c.ConnectionPooler[role].Service = nil + c.ConnectionPooler[role].LookupFunction = false + } + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + + serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + c.ConnectionPooler[role].Deployment = deployment + c.ConnectionPooler[role].Service = service + + c.logger.Debugf("created new connection pooler %q, uid: %q", + util.NameFromMeta(deployment.ObjectMeta), deployment.UID) + + return c.ConnectionPooler[role], nil +} + +func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) ( + *appsv1.Deployment, error) { + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + if *numberOfInstances < constants.ConnectionPoolerMinInstances { + msg := "Adjusted number of connection pooler instances from %d to %d" + c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) + + *numberOfInstances = constants.ConnectionPoolerMinInstances + } + + if err != nil { + return nil, err + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName(role), + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this deployment, but there is a hope that this object + // will be garbage collected if something went wrong and operator + // didn't deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: numberOfInstances, + Selector: c.connectionPoolerLabelsSelector(role), + Template: *podTemplate, + }, + } + + return deployment, nil +} + +func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) *v1.Service { + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + serviceSpec := v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: c.connectionPoolerName(role), + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, + }, + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(role), + }, + } + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName(role), + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this service, but there is a hope that this object will + // be garbage collected if something went wrong and operator didn't + // deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: serviceSpec, + } + + return service +} + +// delete connection pooler +func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role cluster.PostgresRole, c cluster.Cluster) (err error) { + c.setProcessName("deleting connection pooler") + c.logger.Debugln("deleting connection pooler") + + // Lack of connection pooler objects is not a fatal error, just log it if + // it was present before in the manifest + if c.ConnectionPooler == nil { + c.logger.Infof("No connection pooler to delete") + return nil + } + + // Clean up the deployment object. If deployment resource we've remembered + // is somehow empty, try to delete based on what would we generate + var deployment *appsv1.Deployment + deployment = c.ConnectionPooler[role].Deployment + + policy := metav1.DeletePropagationForeground + options := metav1.DeleteOptions{PropagationPolicy: &policy} + + if deployment != nil { + + // set delete propagation policy to foreground, so that replica set will be + // also deleted. + + err = c.KubeClient. + Deployments(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler deployment was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete deployment: %v", err) + } + + c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) + } + + // Repeat the same for the service object + var service *v1.Service + service = c.ConnectionPooler[role].Service + + if service != nil { + + err = c.KubeClient. + Services(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler service was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete service: %v", err) + } + + c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) + } + // Repeat the same for the secret object + secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) + + secret, err := c.KubeClient. + Secrets(c.Namespace). + Get(context.TODO(), secretName, metav1.GetOptions{}) + + if err != nil { + c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) + } else { + if err = c.deleteSecret(secret.UID, *secret); err != nil { + return fmt.Errorf("could not delete pooler secret: %v", err) + } + } + + c.ConnectionPooler = nil + return nil +} + +// Perform actual patching of a connection pooler deployment, assuming that all +// the check were already done before. +func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { + c.setProcessName("updating connection pooler") + if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { + return nil, fmt.Errorf("there is no connection pooler in the cluster") + } + + patchData, err := specPatch(newDeployment.Spec) + if err != nil { + return nil, fmt.Errorf("could not form patch for the deployment: %v", err) + } + + // An update probably requires RetryOnConflict, but since only one operator + // worker at one time will try to update it chances of conflicts are + // minimal. + deployment, err := c.KubeClient. + Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + context.TODO(), + c.ConnectionPooler[role].Deployment.Name, + types.MergePatchType, + patchData, + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch deployment: %v", err) + } + + c.ConnectionPooler[role].Deployment = deployment + + return deployment, nil +} + +//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment +func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { + c.logger.Debugf("updating connection pooler annotations") + patchData, err := metaAnnotationsPatch(annotations) + if err != nil { + return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) + } + result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + context.TODO(), + c.ConnectionPooler[role].Deployment.Name, + types.MergePatchType, + []byte(patchData), + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) + } + return result, nil + +} + +//sync connection pooler + +// Test if two connection pooler configuration needs to be synced. For simplicity +// compare not the actual K8S objects, but the configuration itself and request +// sync if there is any difference. +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, c cluster.Cluster) (sync bool, reasons []string) { + reasons = []string{} + sync = false + + changelog, err := diff.Diff(oldSpec, newSpec) + if err != nil { + c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + return false, reasons + } + + if len(changelog) > 0 { + sync = true + } + + for _, change := range changelog { + msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", + change.Type, change.Path, change.From, change.To) + reasons = append(reasons, msg) + } + + return sync, reasons +} + +// Check if we need to synchronize connection pooler deployment due to new +// defaults, that are different from what we see in the DeploymentSpec +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment, c cluster.Cluster) (sync bool, reasons []string) { + + reasons = []string{} + sync = false + + config := c.OpConfig.ConnectionPooler + podTemplate := deployment.Spec.Template + poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] + + if spec == nil { + spec = &acidv1.ConnectionPooler{} + } + + if spec.NumberOfInstances == nil && + *deployment.Spec.Replicas != *config.NumberOfInstances { + + sync = true + msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", + *deployment.Spec.Replicas, *config.NumberOfInstances) + reasons = append(reasons, msg) + } + + if spec.DockerImage == "" && + poolerContainer.Image != config.Image { + + sync = true + msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", + poolerContainer.Image, config.Image) + reasons = append(reasons, msg) + } + + expectedResources, err := generateResourceRequirements(spec.Resources, + c.makeDefaultConnectionPoolerResources()) + + // An error to generate expected resources means something is not quite + // right, but for the purpose of robustness do not panic here, just report + // and ignore resources comparison (in the worst case there will be no + // updates for new resource values). + if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { + sync = true + msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", + poolerContainer.Resources, expectedResources) + reasons = append(reasons, msg) + } + + if err != nil { + c.logger.Warningf("Cannot generate expected resources, %v", err) + } + + for _, env := range poolerContainer.Env { + if spec.User == "" && env.Name == "PGUSER" { + ref := env.ValueFrom.SecretKeyRef.LocalObjectReference + + if ref.Name != c.credentialSecretName(config.User) { + sync = true + msg := fmt.Sprintf("pooler user is different (having %s, required %s)", + ref.Name, config.User) + reasons = append(reasons, msg) + } + } + + if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { + sync = true + msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", + env.Value, config.Schema) + reasons = append(reasons, msg) + } + } + + return sync, reasons +} + +func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup cluster.InstallFunction, c cluster.Cluster) (SyncReason, error) { + + var reason SyncReason + var err error + var newNeedConnectionPooler, oldNeedConnectionPooler bool + + // Check and perform the sync requirements for each of the roles. + for _, role := range [2]PostgresRole{Master, Replica} { + if role == Master { + newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) + } else { + newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) + } + if c.ConnectionPooler == nil { + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) + c.ConnectionPooler[role].Deployment = nil + c.ConnectionPooler[role].Service = nil + c.ConnectionPooler[role].LookupFunction = false + } + + if newNeedConnectionPooler { + // Try to sync in any case. If we didn't needed connection pooler before, + // it means we want to create it. If it was already present, still sync + // since it could happen that there is no difference in specs, and all + // the resources are remembered, but the deployment was manually deleted + // in between + c.logger.Debug("syncing connection pooler for the role %v", role) + + // in this case also do not forget to install lookup function as for + // creating cluster + if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { + newConnectionPooler := newSpec.Spec.ConnectionPooler + + specSchema := "" + specUser := "" + + if newConnectionPooler != nil { + specSchema = newConnectionPooler.Schema + specUser = newConnectionPooler.User + } + + schema := util.Coalesce( + specSchema, + c.OpConfig.ConnectionPooler.Schema) + + user := util.Coalesce( + specUser, + c.OpConfig.ConnectionPooler.User) + + if err = lookup(schema, user, role); err != nil { + return NoSync, err + } + } + + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + return reason, err + } + } + + if oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources + otherRole := role + if len(c.RolesConnectionPooler()) == 2 { + if role == Master { + otherRole = Replica + } else { + otherRole = Master + } + } + if c.ConnectionPooler != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { + c.ConnectionPooler = nil + } + } + + if !oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources if not empty + otherRole := role + if len(c.RolesConnectionPooler()) == 2 { + if role == Master { + otherRole = Replica + } else { + otherRole = Master + } + } + if c.ConnectionPooler != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { + c.ConnectionPooler = nil + } + } + } + + return reason, nil +} + +// Synchronize connection pooler resources. Effectively we're interested only in +// synchronizing the corresponding deployment, but in case of deployment or +// service is missing, create it. After checking, also remember an object for +// the future references. +func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role cluster.PostgresRole, c cluster.Cluster) ( + SyncReason, error) { + + deployment, err := c.KubeClient. + Deployments(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Deployment %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return NoSync, fmt.Errorf(msg, err) + } + + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Deployment = deployment + } else if err != nil { + msg := "could not get connection pooler deployment to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + c.ConnectionPooler[role].Deployment = deployment + + // actual synchronization + oldConnectionPooler := oldSpec.Spec.ConnectionPooler + newConnectionPooler := newSpec.Spec.ConnectionPooler + + // sync implementation below assumes that both old and new specs are + // not nil, but it can happen. To avoid any confusion like updating a + // deployment because the specification changed from nil to an empty + // struct (that was initialized somewhere before) replace any nil with + // an empty spec. + if oldConnectionPooler == nil { + oldConnectionPooler = &acidv1.ConnectionPooler{} + } + + if newConnectionPooler == nil { + newConnectionPooler = &acidv1.ConnectionPooler{} + } + + c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) + + specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) + reason := append(specReason, defaultsReason...) + + if specSync || defaultsSync { + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg := "could not generate deployment for connection pooler: %v" + return reason, fmt.Errorf(msg, err) + } + + oldDeploymentSpec := c.ConnectionPooler[role].Deployment + + deployment, err := c.updateConnectionPoolerDeployment( + oldDeploymentSpec, + newDeploymentSpec, + role) + + if err != nil { + return reason, err + } + c.ConnectionPooler[role].Deployment = deployment + + return reason, nil + } + } + + newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) + if newAnnotations != nil { + c.updateConnectionPoolerAnnotations(newAnnotations, role) + } + + service, err := c.KubeClient. + Services(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Service %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Service = service + + } else if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + // Service updates are not supported and probably not that useful anyway + c.ConnectionPooler[role].Service = service + } + + return NoSync, nil +} From 49f628548e53b2a78831e377552fdff09fd7013a Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 6 Oct 2020 21:36:19 +0200 Subject: [PATCH 03/40] Add interface to use common functions --- pkg/cluster/cluster.go | 181 ++----- pkg/cluster/k8sres.go | 201 -------- pkg/cluster/sync.go | 6 +- pkg/cluster/util.go | 58 --- pkg/connection_pooler/connection_pooler.go | 557 ++++++++++++++++----- pkg/pooler_interface/pooler_interface.go | 18 + 6 files changed, 477 insertions(+), 544 deletions(-) create mode 100644 pkg/pooler_interface/pooler_interface.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 940af7e5b..39434b1b6 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -12,7 +12,6 @@ import ( "sync" "time" - "github.com/r3labs/diff" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -25,6 +24,8 @@ import ( "k8s.io/client-go/tools/reference" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + + "github.com/zalando/postgres-operator/pkg/connection_pooler" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -34,6 +35,7 @@ import ( "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" + rbacv1 "k8s.io/api/rbac/v1" ) @@ -53,20 +55,6 @@ type Config struct { PodServiceAccountRoleBinding *rbacv1.RoleBinding } -// K8S objects that are belongs to a connection pooler -type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service - - // It could happen that a connection pooler was enabled, but the operator - // was not able to properly process a corresponding event or was restarted. - // In this case we will miss missing/require situation and a lookup function - // will not be installed. To avoid synchronizing it all the time to prevent - // this, we can remember the result in memory at least until the next - // restart. - LookupFunction bool -} - type kubeResources struct { Services map[PostgresRole]*v1.Service Endpoints map[PostgresRole]*v1.Endpoints @@ -101,9 +89,8 @@ type Cluster struct { currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex - ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects + ConnectionPooler map[PostgresRole]*connection_pooler.ConnectionPoolerObjects } - type compareStatefulsetResult struct { match bool replace bool @@ -345,12 +332,19 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - for _, r := range c.RolesConnectionPooler() { - if c.ConnectionPooler[r] != nil { - c.logger.Warning("Connection pooler already exists in the cluster") - return nil + if c.needConnectionPooler() { + + roles := c.RolesConnectionPooler() + for _, r := range roles { + c.logger.Warningf("found roles are %v", r) + } - connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) + for _, r := range c.RolesConnectionPooler() { + if c.ConnectionPooler[r] != nil { + c.logger.Warning("Connection pooler already exists in the cluster") + return nil + } + connectionPooler, err := c.ConnectionPooler[r].createConnectionPooler(c.installLookupFunction, r) if err != nil { c.logger.Warningf("could not create connection pooler: %v", err) return nil @@ -359,7 +353,6 @@ func (c *Cluster) Create() error { util.NameFromMeta(connectionPooler.Deployment.ObjectMeta), r) } } - return nil } @@ -781,15 +774,31 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } // sync connection pooler - if _, err := c.syncConnectionPooler(oldSpec, newSpec, - c.installLookupFunction); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) - updateFailed = true + for _, role := range c.RolesConnectionPooler() { + if _, err := c.ConnectionPooler[role].syncConnectionPooler(oldSpec, newSpec, + c.installLookupFunction); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + updateFailed = true + } } return nil } +func syncResources(a, b *v1.ResourceRequirements) bool { + for _, res := range []v1.ResourceName{ + v1.ResourceCPU, + v1.ResourceMemory, + } { + if !a.Limits[res].Equal(b.Limits[res]) || + !a.Requests[res].Equal(b.Requests[res]) { + return true + } + } + + return false +} + // Delete deletes the cluster and cleans up all objects associated with it (including statefulsets). // The deletion order here is somewhat significant, because Patroni, when running with the Kubernetes // DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint @@ -839,7 +848,7 @@ func (c *Cluster) Delete() { // manifest, just to not keep orphaned components in case if something went // wrong for _, role := range [2]PostgresRole{Master, Replica} { - if err := c.deleteConnectionPooler(role); err != nil { + if err := c.ConnectionPooler[role].deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } } @@ -1387,119 +1396,3 @@ func (c *Cluster) deletePatroniClusterConfigMaps() error { return c.deleteClusterObject(get, deleteConfigMapFn, "configmap") } - -// Test if two connection pooler configuration needs to be synced. For simplicity -// compare not the actual K8S objects, but the configuration itself and request -// sync if there is any difference. -func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { - reasons = []string{} - sync = false - - changelog, err := diff.Diff(oldSpec, newSpec) - if err != nil { - c.logger.Infof("Cannot get diff, do not do anything, %+v", err) - return false, reasons - } - - if len(changelog) > 0 { - sync = true - } - - for _, change := range changelog { - msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", - change.Type, change.Path, change.From, change.To) - reasons = append(reasons, msg) - } - - return sync, reasons -} - -func syncResources(a, b *v1.ResourceRequirements) bool { - for _, res := range []v1.ResourceName{ - v1.ResourceCPU, - v1.ResourceMemory, - } { - if !a.Limits[res].Equal(b.Limits[res]) || - !a.Requests[res].Equal(b.Requests[res]) { - return true - } - } - - return false -} - -// Check if we need to synchronize connection pooler deployment due to new -// defaults, that are different from what we see in the DeploymentSpec -func (c *Cluster) needSyncConnectionPoolerDefaults( - spec *acidv1.ConnectionPooler, - deployment *appsv1.Deployment) (sync bool, reasons []string) { - - reasons = []string{} - sync = false - - config := c.OpConfig.ConnectionPooler - podTemplate := deployment.Spec.Template - poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] - - if spec == nil { - spec = &acidv1.ConnectionPooler{} - } - - if spec.NumberOfInstances == nil && - *deployment.Spec.Replicas != *config.NumberOfInstances { - - sync = true - msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", - *deployment.Spec.Replicas, *config.NumberOfInstances) - reasons = append(reasons, msg) - } - - if spec.DockerImage == "" && - poolerContainer.Image != config.Image { - - sync = true - msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", - poolerContainer.Image, config.Image) - reasons = append(reasons, msg) - } - - expectedResources, err := generateResourceRequirements(spec.Resources, - c.makeDefaultConnectionPoolerResources()) - - // An error to generate expected resources means something is not quite - // right, but for the purpose of robustness do not panic here, just report - // and ignore resources comparison (in the worst case there will be no - // updates for new resource values). - if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { - sync = true - msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", - poolerContainer.Resources, expectedResources) - reasons = append(reasons, msg) - } - - if err != nil { - c.logger.Warningf("Cannot generate expected resources, %v", err) - } - - for _, env := range poolerContainer.Env { - if spec.User == "" && env.Name == "PGUSER" { - ref := env.ValueFrom.SecretKeyRef.LocalObjectReference - - if ref.Name != c.credentialSecretName(config.User) { - sync = true - msg := fmt.Sprintf("pooler user is different (having %s, required %s)", - ref.Name, config.User) - reasons = append(reasons, msg) - } - } - - if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { - sync = true - msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", - env.Value, config.Schema) - reasons = append(reasons, msg) - } - } - - return sync, reasons -} diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index c5bef5102..9acf39294 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -75,14 +75,6 @@ func (c *Cluster) statefulSetName() string { return c.Name } -func (c *Cluster) connectionPoolerName(role PostgresRole) string { - name := c.Name + "-pooler" - if role == Replica { - name = name + "-repl" - } - return name -} - func (c *Cluster) endpointName(role PostgresRole) string { name := c.Name if role == Replica { @@ -146,26 +138,6 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources { } } -// Generate default resource section for connection pooler deployment, to be -// used if nothing custom is specified in the manifest -func (c *Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources { - config := c.OpConfig - - defaultRequests := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, - } - defaultLimits := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, - } - - return acidv1.Resources{ - ResourceRequests: defaultRequests, - ResourceLimits: defaultLimits, - } -} - func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { var err error @@ -2068,179 +2040,6 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) { return "logical-backup-" + c.clusterName().Name } -// Generate pool size related environment variables. -// -// MAX_DB_CONN would specify the global maximum for connections to a target -// database. -// -// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. -// -// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when -// most of the queries coming through a connection pooler are from the same -// user to the same db). In case if we want to spin up more connection pooler -// instances, take this into account and maintain the same number of -// connections. -// -// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload -// have to wait for spinning up a new connections. -// -// RESERVE_SIZE is how many additional connections to allow for a pooler. -func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { - effectiveMode := util.Coalesce( - spec.ConnectionPooler.Mode, - c.OpConfig.ConnectionPooler.Mode) - - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - effectiveMaxDBConn := util.CoalesceInt32( - spec.ConnectionPooler.MaxDBConnections, - c.OpConfig.ConnectionPooler.MaxDBConnections) - - if effectiveMaxDBConn == nil { - effectiveMaxDBConn = k8sutil.Int32ToPointer( - constants.ConnectionPoolerMaxDBConnections) - } - - maxDBConn := *effectiveMaxDBConn / *numberOfInstances - - defaultSize := maxDBConn / 2 - minSize := defaultSize / 2 - reserveSize := minSize - - return []v1.EnvVar{ - { - Name: "CONNECTION_POOLER_PORT", - Value: fmt.Sprint(pgPort), - }, - { - Name: "CONNECTION_POOLER_MODE", - Value: effectiveMode, - }, - { - Name: "CONNECTION_POOLER_DEFAULT_SIZE", - Value: fmt.Sprint(defaultSize), - }, - { - Name: "CONNECTION_POOLER_MIN_SIZE", - Value: fmt.Sprint(minSize), - }, - { - Name: "CONNECTION_POOLER_RESERVE_SIZE", - Value: fmt.Sprint(reserveSize), - }, - { - Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", - Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), - }, - { - Name: "CONNECTION_POOLER_MAX_DB_CONN", - Value: fmt.Sprint(maxDBConn), - }, - } -} - -func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( - *v1.PodTemplateSpec, error) { - - gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) - resources, err := generateResourceRequirements( - spec.ConnectionPooler.Resources, - c.makeDefaultConnectionPoolerResources()) - - effectiveDockerImage := util.Coalesce( - spec.ConnectionPooler.DockerImage, - c.OpConfig.ConnectionPooler.Image) - - effectiveSchema := util.Coalesce( - spec.ConnectionPooler.Schema, - c.OpConfig.ConnectionPooler.Schema) - - if err != nil { - return nil, fmt.Errorf("could not generate resource requirements: %v", err) - } - - secretSelector := func(key string) *v1.SecretKeySelector { - effectiveUser := util.Coalesce( - spec.ConnectionPooler.User, - c.OpConfig.ConnectionPooler.User) - - return &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: c.credentialSecretName(effectiveUser), - }, - Key: key, - } - } - - envVars := []v1.EnvVar{ - { - Name: "PGHOST", - Value: c.serviceAddress(role), - }, - { - Name: "PGPORT", - Value: c.servicePort(role), - }, - { - Name: "PGUSER", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("username"), - }, - }, - // the convention is to use the same schema name as - // connection pooler username - { - Name: "PGSCHEMA", - Value: effectiveSchema, - }, - { - Name: "PGPASSWORD", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("password"), - }, - }, - } - - envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...) - - poolerContainer := v1.Container{ - Name: connectionPoolerContainer, - Image: effectiveDockerImage, - ImagePullPolicy: v1.PullIfNotPresent, - Resources: *resources, - Ports: []v1.ContainerPort{ - { - ContainerPort: pgPort, - Protocol: v1.ProtocolTCP, - }, - }, - Env: envVars, - } - - podTemplate := &v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, - Namespace: c.Namespace, - Annotations: c.generatePodAnnotations(spec), - }, - Spec: v1.PodSpec{ - ServiceAccountName: c.OpConfig.PodServiceAccountName, - TerminationGracePeriodSeconds: &gracePeriod, - Containers: []v1.Container{poolerContainer}, - // TODO: add tolerations to scheduler pooler on the same node - // as database - //Tolerations: *tolerationsSpec, - }, - } - - return podTemplate, nil -} - // Return an array of ownerReferences to make an arbitraty object dependent on // the StatefulSet. Dependency is made on StatefulSet instead of PostgreSQL CRD // while the former is represent the actual state, and only it's deletion means diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 49408fbf7..0738f2b77 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -128,8 +128,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } // sync connection pooler - if _, err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { - return fmt.Errorf("could not sync connection pooler: %v", err) + for _, role := range c.RolesConnectionPooler() { + if _, err = c.ConnectionPooler[role].syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { + return fmt.Errorf("could not sync connection pooler: %v", err) + } } return err diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index f00086e50..d5b9bfb67 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -415,30 +415,6 @@ func (c *Cluster) labelsSelector() *metav1.LabelSelector { } } -// Return connection pooler labels selector, which should from one point of view -// inherit most of the labels from the cluster itself, but at the same time -// have e.g. different `application` label, so that recreatePod operation will -// not interfere with it (it lists all the pods via labels, and if there would -// be no difference, it will recreate also pooler pods). -func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { - connectionPoolerLabels := labels.Set(map[string]string{}) - - extraLabels := labels.Set(map[string]string{ - "connection-pooler-name": c.connectionPoolerName(role), - "application": "db-connection-pooler", - "role": string(role), - "cluster-name": c.ClusterName, - }) - - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels) - - return &metav1.LabelSelector{ - MatchLabels: connectionPoolerLabels, - MatchExpressions: nil, - } -} - func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) labels.Set { lbls := c.labelsSet(shouldAddExtraLabels) lbls[c.OpConfig.PodRoleLabel] = string(role) @@ -521,40 +497,6 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { return c.OpConfig.KubernetesUseConfigMaps } -// isConnectionPoolerEnabled -func (c *Cluster) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) -} - -func (c *Cluster) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler -} - -func (c *Cluster) needMasterConnectionPooler() bool { - return c.needMasterConnectionPoolerWorker(&c.Spec) -} - -func (c *Cluster) needConnectionPooler() bool { - return c.needMasterConnectionPoolerWorker(&c.Spec) || c.needReplicaConnectionPoolerWorker(&c.Spec) -} - -// RolesConnectionPooler gives the list of roles which need connection pooler -func (c *Cluster) RolesConnectionPooler() []PostgresRole { - roles := make([]PostgresRole, 2) - - if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Master) - } - if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Replica) - } - return roles -} - -func (c *Cluster) needReplicaConnectionPooler() bool { - return c.needReplicaConnectionPoolerWorker(&c.Spec) -} - // Earlier arguments take priority func mergeContainers(containers ...[]v1.Container) ([]v1.Container, []string) { containerNameTaken := map[string]bool{} diff --git a/pkg/connection_pooler/connection_pooler.go b/pkg/connection_pooler/connection_pooler.go index 8a218ce7f..cf24f808a 100644 --- a/pkg/connection_pooler/connection_pooler.go +++ b/pkg/connection_pooler/connection_pooler.go @@ -4,25 +4,35 @@ import ( "context" "fmt" + "github.com/r3labs/diff" + "github.com/sirupsen/logrus" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/pooler_interface" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "github.com/r3labs/diff" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/cluster" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" ) +const ( + connectionPoolerContainer = "connection-pooler" + pgPort = 5432 +) + // K8S objects that are belongs to a connection pooler type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service - Name string + Deployment *appsv1.Deployment + Service *v1.Service + Name string + ClusterName string + Namespace string + logger *logrus.Entry // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. // In this case we will miss missing/require situation and a lookup function @@ -32,6 +42,92 @@ type ConnectionPoolerObjects struct { LookupFunction bool } +type SyncReason []string + +// no sync happened, empty value +var NoSync SyncReason = []string{} + +// PostgresRole describes role of the node +type PostgresRole string + +const ( + // Master role + Master PostgresRole = "master" + + // Replica role + Replica PostgresRole = "replica" +) + +type InstallFunction func(schema string, user string, role PostgresRole) error + +func (cp *ConnectionPoolerObjects) connectionPoolerName(role PostgresRole) string { + name := cp.ClusterName + "-pooler" + if role == Replica { + name = name + "-repl" + } + return name +} + +// isConnectionPoolerEnabled +func (cp *ConnectionPoolerObjects) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) +} + +func (cp *ConnectionPoolerObjects) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler +} + +//TODO: use spec from cluster +func (cp *ConnectionPoolerObjects) needMasterConnectionPooler() bool { + return cp.needMasterConnectionPoolerWorker(&c.Spec) +} + +func (cp *ConnectionPoolerObjects) needConnectionPooler() bool { + return cp.needMasterConnectionPoolerWorker(&c.Spec) || cp.needReplicaConnectionPoolerWorker(&c.Spec) +} + +// RolesConnectionPooler gives the list of roles which need connection pooler +func (cp *ConnectionPoolerObjects) RolesConnectionPooler() []PostgresRole { + roles := make([]PostgresRole, 2) + + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles = append(roles, Master) + } + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles = append(roles, Replica) + } + return roles +} + +func (cp *ConnectionPoolerObjects) needReplicaConnectionPooler() bool { + return cp.needReplicaConnectionPoolerWorker(&c.Spec) +} + +// Return connection pooler labels selector, which should from one point of view +// inherit most of the labels from the cluster itself, but at the same time +// have e.g. different `application` label, so that recreatePod operation will +// not interfere with it (it lists all the pods via labels, and if there would +// be no difference, it will recreate also pooler pods). +func (cp *ConnectionPoolerObjects) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { + connectionPoolerLabels := labels.Set(map[string]string{}) + + extraLabels := labels.Set(map[string]string{ + "connection-pooler-name": cp.connectionPoolerName(role), + "application": "db-connection-pooler", + "role": string(role), + "cluster-name": cp.ClusterName, + }) + + connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) + connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels) + + return &metav1.LabelSelector{ + MatchLabels: connectionPoolerLabels, + MatchExpressions: nil, + } +} + +//TODO: how to use cluster type! // Prepare the database for connection pooler to be used, i.e. install lookup // function (do it first, because it should be fast and if it didn't succeed, // it doesn't makes sense to create more K8S objects. At this moment we assume @@ -43,7 +139,7 @@ type ConnectionPoolerObjects struct { // have connectionpooler name in the cp object to have it immutable name // add these cp related functions to a new cp file // opConfig, cluster, and database name -func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.InstallFunction, role cluster.PostgresRole, c cluster.Cluster) (*ConnectionPoolerObjects, error) { +func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { var msg string c.setProcessName("creating connection pooler") @@ -58,19 +154,19 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.Install user = c.OpConfig.ConnectionPooler.User } - err := c.lookup(schema, user, role) + err := lookup(schema, user, role) if err != nil { msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } if c.ConnectionPooler[role] == nil { - c.ConnectionPooler = make(map[c.PostgresRole]*ConnectionPoolerObjects) + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) c.ConnectionPooler[role].Deployment = nil c.ConnectionPooler[role].Service = nil c.ConnectionPooler[role].LookupFunction = false } - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + deploymentSpec, err := c.ConnectionPooler[role].generateConnectionPoolerDeployment(&c.Spec, role) if err != nil { msg = "could not generate deployment for connection pooler: %v" return nil, fmt.Errorf(msg, err) @@ -87,7 +183,7 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.Install return nil, err } - serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) + serviceSpec := c.ConnectionPooler[role].generateConnectionPoolerService(&c.Spec, role) service, err := c.KubeClient. Services(serviceSpec.Namespace). Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) @@ -104,7 +200,184 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.Install return c.ConnectionPooler[role], nil } -func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) ( +//TODO: Figure out how can we go about for the opconfig required here! +// +// Generate pool size related environment variables. +// +// MAX_DB_CONN would specify the global maximum for connections to a target +// database. +// +// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. +// +// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when +// most of the queries coming through a connection pooler are from the same +// user to the same db). In case if we want to spin up more connection pooler +// instances, take this into account and maintain the same number of +// connections. +// +// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload +// have to wait for spinning up a new connections. +// +// RESERVE_SIZE is how many additional connections to allow for a pooler. +func (cp *ConnectionPoolerObjects) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { + effectiveMode := util.Coalesce( + spec.ConnectionPooler.Mode, + c.OpConfig.ConnectionPooler.Mode) + + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + effectiveMaxDBConn := util.CoalesceInt32( + spec.ConnectionPooler.MaxDBConnections, + c.OpConfig.ConnectionPooler.MaxDBConnections) + + if effectiveMaxDBConn == nil { + effectiveMaxDBConn = k8sutil.Int32ToPointer( + constants.ConnectionPoolerMaxDBConnections) + } + + maxDBConn := *effectiveMaxDBConn / *numberOfInstances + + defaultSize := maxDBConn / 2 + minSize := defaultSize / 2 + reserveSize := minSize + + return []v1.EnvVar{ + { + Name: "CONNECTION_POOLER_PORT", + Value: fmt.Sprint(pgPort), + }, + { + Name: "CONNECTION_POOLER_MODE", + Value: effectiveMode, + }, + { + Name: "CONNECTION_POOLER_DEFAULT_SIZE", + Value: fmt.Sprint(defaultSize), + }, + { + Name: "CONNECTION_POOLER_MIN_SIZE", + Value: fmt.Sprint(minSize), + }, + { + Name: "CONNECTION_POOLER_RESERVE_SIZE", + Value: fmt.Sprint(reserveSize), + }, + { + Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", + Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), + }, + { + Name: "CONNECTION_POOLER_MAX_DB_CONN", + Value: fmt.Sprint(maxDBConn), + }, + } +} + +// TODO: Figure out how can we go about for the opconfig required here! +func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( + *v1.PodTemplateSpec, error) { + + gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) + resources, err := pooler_interface.pooler.pooler.generateResourceRequirements( + spec.ConnectionPooler.Resources, + cp.makeDefaultConnectionPoolerResources()) + + effectiveDockerImage := util.Coalesce( + spec.ConnectionPooler.DockerImage, + c.OpConfig.ConnectionPooler.Image) + + effectiveSchema := util.Coalesce( + spec.ConnectionPooler.Schema, + c.OpConfig.ConnectionPooler.Schema) + + if err != nil { + return nil, fmt.Errorf("could not generate resource requirements: %v", err) + } + + secretSelector := func(key string) *v1.SecretKeySelector { + effectiveUser := util.Coalesce( + spec.ConnectionPooler.User, + c.OpConfig.ConnectionPooler.User) + + return &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: pooler_interface.pooler.pooler.credentialSecretName(effectiveUser), + }, + Key: key, + } + } + + envVars := []v1.EnvVar{ + { + Name: "PGHOST", + Value: pooler_interface.pooler.pooler.serviceAddress(role), + }, + { + Name: "PGPORT", + Value: pooler_interface.pooler.pooler.servicePort(role), + }, + { + Name: "PGUSER", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("username"), + }, + }, + // the convention is to use the same schema name as + // connection pooler username + { + Name: "PGSCHEMA", + Value: effectiveSchema, + }, + { + Name: "PGPASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("password"), + }, + }, + } + + envVars = append(envVars, cp.getConnectionPoolerEnvVars(spec)...) + + poolerContainer := v1.Container{ + Name: connectionPoolerContainer, + Image: effectiveDockerImage, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resources, + Ports: []v1.ContainerPort{ + { + ContainerPort: pgPort, + Protocol: v1.ProtocolTCP, + }, + }, + Env: envVars, + } + + podTemplate := &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, + Namespace: cp.Namespace, + Annotations: pooler_interface.pooler.pooler.generatePodAnnotations(spec), + }, + Spec: v1.PodSpec{ + ServiceAccountName: c.OpConfig.PodServiceAccountName, + TerminationGracePeriodSeconds: &gracePeriod, + Containers: []v1.Container{poolerContainer}, + // TODO: add tolerations to scheduler pooler on the same node + // as database + //Tolerations: *tolerationsSpec, + }, + } + + return podTemplate, nil +} + +//TODO: How to use opconfig from cluster type +func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( *appsv1.Deployment, error) { // there are two ways to enable connection pooler, either to specify a @@ -117,7 +390,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) + podTemplate, err := cp.generateConnectionPoolerPodTemplate(spec, role) numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( @@ -127,7 +400,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid if *numberOfInstances < constants.ConnectionPoolerMinInstances { msg := "Adjusted number of connection pooler instances from %d to %d" - c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) + cp.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) *numberOfInstances = constants.ConnectionPoolerMinInstances } @@ -138,9 +411,9 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Name: cp.connectionPoolerName(role), + Namespace: cp.Namespace, + Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -148,11 +421,11 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid // clean up this deployment, but there is a hope that this object // will be garbage collected if something went wrong and operator // didn't deleted it. - OwnerReferences: c.ownerReferences(), + OwnerReferences: pooler_interface.pooler.ownerReferences(), }, Spec: appsv1.DeploymentSpec{ Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(role), + Selector: cp.connectionPoolerLabelsSelector(role), Template: *podTemplate, }, } @@ -160,7 +433,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid return deployment, nil } -func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) *v1.Service { +func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { // there are two ways to enable connection pooler, either to specify a // connectionPooler section or enableConnectionPooler. In the second case @@ -175,22 +448,22 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1. serviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ { - Name: c.connectionPoolerName(role), + Name: cp.connectionPoolerName(role), Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, + TargetPort: intstr.IntOrString{StrVal: pooler_interface.pooler.servicePort(role)}, }, }, Type: v1.ServiceTypeClusterIP, Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(role), + "connection-pooler": cp.connectionPoolerName(role), }, } service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Name: cp.connectionPoolerName(role), + Namespace: cp.Namespace, + Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -198,7 +471,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1. // clean up this service, but there is a hope that this object will // be garbage collected if something went wrong and operator didn't // deleted it. - OwnerReferences: c.ownerReferences(), + OwnerReferences: pooler_interface.pooler.ownerReferences(), }, Spec: serviceSpec, } @@ -206,22 +479,23 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1. return service } -// delete connection pooler -func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role cluster.PostgresRole, c cluster.Cluster) (err error) { - c.setProcessName("deleting connection pooler") - c.logger.Debugln("deleting connection pooler") +// TODO: how to use KubeClient, opconfig, deleteSecret, credentialSecretName from cluster package +//delete connection pooler +func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role PostgresRole) (err error) { + //c.setProcessName("deleting connection pooler") + cp.logger.Debugln("deleting connection pooler") // Lack of connection pooler objects is not a fatal error, just log it if // it was present before in the manifest - if c.ConnectionPooler == nil { - c.logger.Infof("No connection pooler to delete") + if cp == nil { + cp.logger.Infof("No connection pooler to delete") return nil } // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate var deployment *appsv1.Deployment - deployment = c.ConnectionPooler[role].Deployment + deployment = cp.Deployment policy := metav1.DeletePropagationForeground options := metav1.DeleteOptions{PropagationPolicy: &policy} @@ -232,64 +506,65 @@ func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role cluster.PostgresR // also deleted. err = c.KubeClient. - Deployments(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) + Deployments(cp.Namespace). + Delete(context.TODO(), cp.connectionPoolerName(role), options) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler deployment was already deleted") + cp.logger.Debugf("Connection pooler deployment was already deleted") } else if err != nil { return fmt.Errorf("could not delete deployment: %v", err) } - c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) + cp.logger.Infof("Connection pooler deployment %q has been deleted", cp.connectionPoolerName(role)) } // Repeat the same for the service object var service *v1.Service - service = c.ConnectionPooler[role].Service + service = cp.Service if service != nil { err = c.KubeClient. - Services(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) + Services(cp.Namespace). + Delete(context.TODO(), cp.connectionPoolerName(role), options) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler service was already deleted") + cp.logger.Debugf("Connection pooler service was already deleted") } else if err != nil { return fmt.Errorf("could not delete service: %v", err) } - c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) + cp.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) } // Repeat the same for the secret object - secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) + secretName := pooler_interface.pooler.credentialSecretName(c.OpConfig.ConnectionPooler.User) secret, err := c.KubeClient. - Secrets(c.Namespace). + Secrets(cp.Namespace). Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { - c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) + cp.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) } else { - if err = c.deleteSecret(secret.UID, *secret); err != nil { + if err = pooler_interface.pooler.deleteSecret(secret.UID, *secret); err != nil { return fmt.Errorf("could not delete pooler secret: %v", err) } } - c.ConnectionPooler = nil + cp = nil return nil } +//TODO: use KubeClient from cluster package // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. -func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { - c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { +func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { + //c.setProcessName("updating connection pooler") + if cp == nil || cp.Deployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } - patchData, err := specPatch(newDeployment.Spec) + patchData, err := pooler_interface.pooler.specPatch(newDeployment.Spec) if err != nil { return nil, fmt.Errorf("could not form patch for the deployment: %v", err) } @@ -298,9 +573,9 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymen // worker at one time will try to update it chances of conflicts are // minimal. deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + Deployments(cp.Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler[role].Deployment.Name, + cp.Deployment.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, @@ -309,21 +584,22 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymen return nil, fmt.Errorf("could not patch deployment: %v", err) } - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment return deployment, nil } +//TODO use Kubeclient //updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { - c.logger.Debugf("updating connection pooler annotations") - patchData, err := metaAnnotationsPatch(annotations) +func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { + cp.logger.Debugf("updating connection pooler annotations") + patchData, err := pooler_interface.pooler.metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) } - result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + result, err := c.KubeClient.Deployments(cp.Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler[role].Deployment.Name, + cp.Deployment.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}, @@ -335,18 +611,16 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations } -//sync connection pooler - // Test if two connection pooler configuration needs to be synced. For simplicity // compare not the actual K8S objects, but the configuration itself and request // sync if there is any difference. -func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, c cluster.Cluster) (sync bool, reasons []string) { +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { reasons = []string{} sync = false changelog, err := diff.Diff(oldSpec, newSpec) if err != nil { - c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + cp.logger.Infof("Cannot get diff, do not do anything, %+v", err) return false, reasons } @@ -363,9 +637,10 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpe return sync, reasons } +//TODO use opConfig from cluster package // Check if we need to synchronize connection pooler deployment due to new // defaults, that are different from what we see in the DeploymentSpec -func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment, c cluster.Cluster) (sync bool, reasons []string) { +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { reasons = []string{} sync = false @@ -396,14 +671,14 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 reasons = append(reasons, msg) } - expectedResources, err := generateResourceRequirements(spec.Resources, - c.makeDefaultConnectionPoolerResources()) + expectedResources, err := pooler_interface.pooler.generateResourceRequirements(spec.Resources, + cp.makeDefaultConnectionPoolerResources()) // An error to generate expected resources means something is not quite // right, but for the purpose of robustness do not panic here, just report // and ignore resources comparison (in the worst case there will be no // updates for new resource values). - if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { + if err == nil && pooler_interface.pooler.syncResources(&poolerContainer.Resources, expectedResources) { sync = true msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", poolerContainer.Resources, expectedResources) @@ -411,14 +686,14 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 } if err != nil { - c.logger.Warningf("Cannot generate expected resources, %v", err) + cp.logger.Warningf("Cannot generate expected resources, %v", err) } for _, env := range poolerContainer.Env { if spec.User == "" && env.Name == "PGUSER" { ref := env.ValueFrom.SecretKeyRef.LocalObjectReference - if ref.Name != c.credentialSecretName(config.User) { + if ref.Name != pooler_interface.pooler.credentialSecretName(config.User) { sync = true msg := fmt.Sprintf("pooler user is different (having %s, required %s)", ref.Name, config.User) @@ -437,7 +712,29 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 return sync, reasons } -func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup cluster.InstallFunction, c cluster.Cluster) (SyncReason, error) { +//TODO use OpConfig from cluster package +// Generate default resource section for connection pooler deployment, to be +// used if nothing custom is specified in the manifest +func (cp ConnectionPoolerObjects) makeDefaultConnectionPoolerResources() acidv1.Resources { + config := c.OpConfig + + defaultRequests := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, + } + defaultLimits := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, + } + + return acidv1.Resources{ + ResourceRequests: defaultRequests, + ResourceLimits: defaultLimits, + } +} + +//TODO use opConfig +func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) (SyncReason, error) { var reason SyncReason var err error @@ -445,18 +742,17 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 // Check and perform the sync requirements for each of the roles. for _, role := range [2]PostgresRole{Master, Replica} { - if role == Master { - newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) + if role == cluster.Master { + newNeedConnectionPooler = cp.needMasterConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = cp.needMasterConnectionPoolerWorker(&oldSpec.Spec) } else { - newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) + newNeedConnectionPooler = cp.needReplicaConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = cp.needReplicaConnectionPoolerWorker(&oldSpec.Spec) } - if c.ConnectionPooler == nil { - c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) - c.ConnectionPooler[role].Deployment = nil - c.ConnectionPooler[role].Service = nil - c.ConnectionPooler[role].LookupFunction = false + if cp == nil { + cp.Deployment = nil + cp.Service = nil + cp.LookupFunction = false } if newNeedConnectionPooler { @@ -465,11 +761,11 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 // since it could happen that there is no difference in specs, and all // the resources are remembered, but the deployment was manually deleted // in between - c.logger.Debug("syncing connection pooler for the role %v", role) + cp.logger.Debug("syncing connection pooler for the role %v", role) // in this case also do not forget to install lookup function as for // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { + if !oldNeedConnectionPooler || !cp.LookupFunction { newConnectionPooler := newSpec.Spec.ConnectionPooler specSchema := "" @@ -493,54 +789,36 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 } } - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) + if reason, err = cp.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { + cp.logger.Errorf("could not sync connection pooler: %v", err) return reason, err } } if oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { + if cp != nil && + (cp.Deployment != nil || + cp.Service != nil) { - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + if err = cp.deleteConnectionPooler(role); err != nil { + cp.logger.Warningf("could not remove connection pooler: %v", err) } } - if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil + if cp != nil && cp.Deployment == nil && cp.Service == nil { + cp = nil } } if !oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources if not empty - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { + if cp != nil && + (cp.Deployment != nil || + cp.Service != nil) { - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + if err = cp.deleteConnectionPooler(role); err != nil { + cp.logger.Warningf("could not remove connection pooler: %v", err) } - } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil } } } @@ -548,22 +826,23 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 return reason, nil } +//TODO use Kubeclient, AnnotationsToPropagate from cluster package // Synchronize connection pooler resources. Effectively we're interested only in // synchronizing the corresponding deployment, but in case of deployment or // service is missing, create it. After checking, also remember an object for // the future references. -func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role cluster.PostgresRole, c cluster.Cluster) ( +func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( SyncReason, error) { deployment, err := c.KubeClient. - Deployments(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + Deployments(cp.Namespace). + Get(context.TODO(), cp.connectionPoolerName(role), metav1.GetOptions{}) if err != nil && k8sutil.ResourceNotFound(err) { msg := "Deployment %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) + cp.logger.Warningf(msg, cp.connectionPoolerName(role)) - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + deploymentSpec, err := cp.generateConnectionPoolerDeployment(&newSpec.Spec, role) if err != nil { msg = "could not generate deployment for connection pooler: %v" return NoSync, fmt.Errorf(msg, err) @@ -576,12 +855,12 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * if err != nil { return NoSync, err } - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment } else if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler @@ -600,24 +879,24 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * newConnectionPooler = &acidv1.ConnectionPooler{} } - c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) + cp.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) - specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) + specSync, specReason := cp.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + defaultsSync, defaultsReason := cp.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) reason := append(specReason, defaultsReason...) if specSync || defaultsSync { - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(role), reason) - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + cp.logger.Infof("Update connection pooler deployment %s, reason: %+v", + cp.connectionPoolerName(role), reason) + newDeploymentSpec, err := cp.generateConnectionPoolerDeployment(&newSpec.Spec, role) if err != nil { msg := "could not generate deployment for connection pooler: %v" return reason, fmt.Errorf(msg, err) } - oldDeploymentSpec := c.ConnectionPooler[role].Deployment + oldDeploymentSpec := cp.Deployment - deployment, err := c.updateConnectionPoolerDeployment( + deployment, err := cp.updateConnectionPoolerDeployment( oldDeploymentSpec, newDeploymentSpec, role) @@ -625,26 +904,26 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * if err != nil { return reason, err } - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment return reason, nil } } - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) + newAnnotations := pooler_interface.pooler.AnnotationsToPropagate(cp.Deployment.Annotations) if newAnnotations != nil { - c.updateConnectionPoolerAnnotations(newAnnotations, role) + cp.updateConnectionPoolerAnnotations(newAnnotations, role) } service, err := c.KubeClient. - Services(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + Services(cp.Namespace). + Get(context.TODO(), cp.connectionPoolerName(role), metav1.GetOptions{}) if err != nil && k8sutil.ResourceNotFound(err) { msg := "Service %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) + cp.logger.Warningf(msg, cp.connectionPoolerName(role)) - serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) + serviceSpec := cp.generateConnectionPoolerService(&newSpec.Spec, role) service, err := c.KubeClient. Services(serviceSpec.Namespace). Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) @@ -652,14 +931,14 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * if err != nil { return NoSync, err } - c.ConnectionPooler[role].Service = service + cp.Service = service } else if err != nil { msg := "could not get connection pooler service to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler[role].Service = service + cp.Service = service } return NoSync, nil diff --git a/pkg/pooler_interface/pooler_interface.go b/pkg/pooler_interface/pooler_interface.go new file mode 100644 index 000000000..91f874778 --- /dev/null +++ b/pkg/pooler_interface/pooler_interface.go @@ -0,0 +1,18 @@ +package pooler_interface + +//functions of cluster package used in connection_pooler package +type pooler interface { + (c *Cluster) credentialSecretName(username string) string + (c *Cluster) serviceAddress(role PostgresRole) string + (c *Cluster) servicePort(role PostgresRole) string + generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) + (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]string + (c *Cluster) ownerReferences() []metav1.OwnerReference + (c *Cluster) credentialSecretName(username string) + (c *Cluster) deleteSecrets() error + specPatch(spec interface{}) ([]byte, error) + metaAnnotationsPatch(annotations map[string]string) ([]byte, error) + generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) + syncResources(a, b *v1.ResourceRequirements) bool + (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string +} From 289b2098c300f96be32a55101a9c8bfeef961c4f Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 7 Oct 2020 10:20:15 +0200 Subject: [PATCH 04/40] place config in a common package --- pkg/cluster/cluster.go | 18 ++------- pkg/connection_pooler/connection_pooler.go | 43 +++++++++------------- pkg/resources/resources.go | 20 ++++++++++ 3 files changed, 41 insertions(+), 40 deletions(-) create mode 100644 pkg/resources/resources.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 39434b1b6..3c1f15feb 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -18,7 +18,6 @@ import ( policybeta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/tools/reference" @@ -27,16 +26,14 @@ import ( "github.com/zalando/postgres-operator/pkg/connection_pooler" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" + "github.com/zalando/postgres-operator/pkg/resources" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" - "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" - - rbacv1 "k8s.io/api/rbac/v1" ) var ( @@ -46,15 +43,6 @@ var ( patroniObjectSuffixes = []string{"config", "failover", "sync"} ) -// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. -type Config struct { - OpConfig config.Config - RestConfig *rest.Config - InfrastructureRoles map[string]spec.PgUser // inherited from the controller - PodServiceAccount *v1.ServiceAccount - PodServiceAccountRoleBinding *rbacv1.RoleBinding -} - type kubeResources struct { Services map[PostgresRole]*v1.Service Endpoints map[PostgresRole]*v1.Endpoints @@ -69,7 +57,7 @@ type kubeResources struct { type Cluster struct { kubeResources acidv1.Postgresql - Config + resources.Config logger *logrus.Entry eventRecorder record.EventRecorder patroni patroni.Interface @@ -99,7 +87,7 @@ type compareStatefulsetResult struct { } // New creates a new cluster. This function should be called from a controller. -func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster { +func New(cfg resources.Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster { deletePropagationPolicy := metav1.DeletePropagationOrphan podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) { diff --git a/pkg/connection_pooler/connection_pooler.go b/pkg/connection_pooler/connection_pooler.go index cf24f808a..6ff7b4c61 100644 --- a/pkg/connection_pooler/connection_pooler.go +++ b/pkg/connection_pooler/connection_pooler.go @@ -7,7 +7,7 @@ import ( "github.com/r3labs/diff" "github.com/sirupsen/logrus" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/pooler_interface" + "github.com/zalando/postgres-operator/pkg/cluster" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/zalando/postgres-operator/pkg/resources" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" @@ -146,12 +147,12 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction schema := c.Spec.ConnectionPooler.Schema if schema == "" { - schema = c.OpConfig.ConnectionPooler.Schema + schema = resources.OpConfig.ConnectionPooler.Schema } user := c.Spec.ConnectionPooler.User if user == "" { - user = c.OpConfig.ConnectionPooler.User + user = resources.OpConfig.ConnectionPooler.User } err := lookup(schema, user, role) @@ -200,7 +201,6 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction return c.ConnectionPooler[role], nil } -//TODO: Figure out how can we go about for the opconfig required here! // // Generate pool size related environment variables. // @@ -222,18 +222,18 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction func (cp *ConnectionPoolerObjects) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { effectiveMode := util.Coalesce( spec.ConnectionPooler.Mode, - c.OpConfig.ConnectionPooler.Mode) + resources.OpConfig.ConnectionPooler.Mode) numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, + resources.OpConfig.ConnectionPooler.NumberOfInstances, k8sutil.Int32ToPointer(1)) } effectiveMaxDBConn := util.CoalesceInt32( spec.ConnectionPooler.MaxDBConnections, - c.OpConfig.ConnectionPooler.MaxDBConnections) + resources.OpConfig.ConnectionPooler.MaxDBConnections) if effectiveMaxDBConn == nil { effectiveMaxDBConn = k8sutil.Int32ToPointer( @@ -278,22 +278,21 @@ func (cp *ConnectionPoolerObjects) getConnectionPoolerEnvVars(spec *acidv1.Postg } } -// TODO: Figure out how can we go about for the opconfig required here! func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( *v1.PodTemplateSpec, error) { - gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) + gracePeriod := int64(resources.OpConfig.PodTerminateGracePeriod.Seconds()) resources, err := pooler_interface.pooler.pooler.generateResourceRequirements( spec.ConnectionPooler.Resources, cp.makeDefaultConnectionPoolerResources()) effectiveDockerImage := util.Coalesce( spec.ConnectionPooler.DockerImage, - c.OpConfig.ConnectionPooler.Image) + resources.OpConfig.ConnectionPooler.Image) effectiveSchema := util.Coalesce( spec.ConnectionPooler.Schema, - c.OpConfig.ConnectionPooler.Schema) + resources.OpConfig.ConnectionPooler.Schema) if err != nil { return nil, fmt.Errorf("could not generate resource requirements: %v", err) @@ -302,7 +301,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *aci secretSelector := func(key string) *v1.SecretKeySelector { effectiveUser := util.Coalesce( spec.ConnectionPooler.User, - c.OpConfig.ConnectionPooler.User) + resources.OpConfig.ConnectionPooler.User) return &v1.SecretKeySelector{ LocalObjectReference: v1.LocalObjectReference{ @@ -364,7 +363,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *aci Annotations: pooler_interface.pooler.pooler.generatePodAnnotations(spec), }, Spec: v1.PodSpec{ - ServiceAccountName: c.OpConfig.PodServiceAccountName, + ServiceAccountName: resources.OpConfig.PodServiceAccountName, TerminationGracePeriodSeconds: &gracePeriod, Containers: []v1.Container{poolerContainer}, // TODO: add tolerations to scheduler pooler on the same node @@ -376,7 +375,6 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *aci return podTemplate, nil } -//TODO: How to use opconfig from cluster type func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( *appsv1.Deployment, error) { @@ -394,7 +392,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, + resources.OpConfig.ConnectionPooler.NumberOfInstances, k8sutil.Int32ToPointer(1)) } @@ -537,7 +535,7 @@ func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role PostgresRole) (er cp.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) } // Repeat the same for the secret object - secretName := pooler_interface.pooler.credentialSecretName(c.OpConfig.ConnectionPooler.User) + secretName := pooler_interface.pooler.credentialSecretName(resources.OpConfig.ConnectionPooler.User) secret, err := c.KubeClient. Secrets(cp.Namespace). @@ -555,7 +553,6 @@ func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role PostgresRole) (er return nil } -//TODO: use KubeClient from cluster package // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { @@ -589,7 +586,6 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymen return deployment, nil } -//TODO use Kubeclient //updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { cp.logger.Debugf("updating connection pooler annotations") @@ -637,7 +633,6 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpe return sync, reasons } -//TODO use opConfig from cluster package // Check if we need to synchronize connection pooler deployment due to new // defaults, that are different from what we see in the DeploymentSpec func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { @@ -645,7 +640,7 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 reasons = []string{} sync = false - config := c.OpConfig.ConnectionPooler + config := resources.OpConfig.ConnectionPooler podTemplate := deployment.Spec.Template poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] @@ -712,11 +707,10 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 return sync, reasons } -//TODO use OpConfig from cluster package // Generate default resource section for connection pooler deployment, to be // used if nothing custom is specified in the manifest func (cp ConnectionPoolerObjects) makeDefaultConnectionPoolerResources() acidv1.Resources { - config := c.OpConfig + config := resources.OpConfig defaultRequests := acidv1.ResourceDescription{ CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, @@ -733,7 +727,6 @@ func (cp ConnectionPoolerObjects) makeDefaultConnectionPoolerResources() acidv1. } } -//TODO use opConfig func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) (SyncReason, error) { var reason SyncReason @@ -778,11 +771,11 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 schema := util.Coalesce( specSchema, - c.OpConfig.ConnectionPooler.Schema) + resources.OpConfig.ConnectionPooler.Schema) user := util.Coalesce( specUser, - c.OpConfig.ConnectionPooler.User) + resources.OpConfig.ConnectionPooler.User) if err = lookup(schema, user, role); err != nil { return NoSync, err diff --git a/pkg/resources/resources.go b/pkg/resources/resources.go new file mode 100644 index 000000000..fc43018c4 --- /dev/null +++ b/pkg/resources/resources.go @@ -0,0 +1,20 @@ +package resources + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + + "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/util/config" + + rbacv1 "k8s.io/api/rbac/v1" +) + +// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. +type Config struct { + OpConfig config.Config + RestConfig *rest.Config + InfrastructureRoles map[string]spec.PgUser // inherited from the controller + PodServiceAccount *v1.ServiceAccount + PodServiceAccountRoleBinding *rbacv1.RoleBinding +} From 403bbb6c87e780d82e121560ed95f40e5cef1eb9 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Fri, 28 Aug 2020 13:25:46 +0200 Subject: [PATCH 05/40] Add pooler for replica --- manifests/complete-postgres-manifest.yaml | 5 +- pkg/apis/acid.zalan.do/v1/crds.go | 3 ++ pkg/apis/acid.zalan.do/v1/postgresql_type.go | 5 +- pkg/cluster/k8sres.go | 50 +++++++++++++++----- 4 files changed, 47 insertions(+), 16 deletions(-) diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 79d1251e6..0bc186b36 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -1,7 +1,7 @@ apiVersion: "acid.zalan.do/v1" kind: postgresql metadata: - name: acid-test-cluster + name: acid-test-cluster2 # labels: # environment: demo # annotations: @@ -18,7 +18,8 @@ spec: - createdb enableMasterLoadBalancer: false enableReplicaLoadBalancer: false -# enableConnectionPooler: true # not needed when connectionPooler section is present (see below) + #enableConnectionPooler: true # not needed when connectionPooler section is present (see below) + enableReplicaConnectionPooler: true # set to enable connectionPooler for replica endpoints allowedSourceRanges: # load balancers' source ranges for both master and replica services - 127.0.0.1/32 databases: diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 2cfc28856..a2d862ee3 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -262,6 +262,9 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "enableConnectionPooler": { Type: "boolean", }, + "enableReplicaConnectionPooler": { + Type: "boolean", + }, "enableLogicalBackup": { Type: "boolean", }, diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 499a4cfda..a3dc490b5 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -29,8 +29,9 @@ type PostgresSpec struct { Patroni `json:"patroni,omitempty"` Resources `json:"resources,omitempty"` - EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` - ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` + EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` + EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"` + ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` TeamID string `json:"teamId"` DockerImage string `json:"dockerImage,omitempty"` diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index fef202538..fb9843289 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2110,9 +2110,13 @@ func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.Env return []v1.EnvVar{ { - Name: "CONNECTION_POOLER_PORT", + Name: "CONNECTION_POOLER_MASTER_PORT", Value: fmt.Sprint(pgPort), }, + { + Name: "CONNECTION_POOLER_REPLICA_PORT", + Value: fmt.Sprint(5433), + }, { Name: "CONNECTION_POOLER_MODE", Value: effectiveMode, @@ -2329,19 +2333,41 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1 if spec.ConnectionPooler == nil { spec.ConnectionPooler = &acidv1.ConnectionPooler{} } + var serviceSpec = v1.ServiceSpec{} - serviceSpec := v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: c.connectionPoolerName(), - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, + if *spec.EnableReplicaConnectionPooler == false { + serviceSpec = v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: c.connectionPoolerName(), + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, + }, }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(), - }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(), + }, + } + } else { + serviceSpec = v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: c.connectionPoolerName(), + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, + }, + { + Name: c.connectionPoolerName() + "-repl", + Port: 5433, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(Replica)}, + }, + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(), + }, + } } service := &v1.Service{ From 75ea994d616b696a7256277e2046228b9e373349 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Fri, 28 Aug 2020 14:55:49 +0200 Subject: [PATCH 06/40] Add new pooler service for replica --- manifests/complete-postgres-manifest.yaml | 6 +- pkg/cluster/cluster.go | 5 +- pkg/cluster/k8sres.go | 84 +++++++++++++---------- pkg/cluster/resources.go | 22 +++++- 4 files changed, 72 insertions(+), 45 deletions(-) diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 0bc186b36..42b7c13a4 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -1,7 +1,7 @@ apiVersion: "acid.zalan.do/v1" kind: postgresql metadata: - name: acid-test-cluster2 + name: acid-test-cluster # labels: # environment: demo # annotations: @@ -18,8 +18,8 @@ spec: - createdb enableMasterLoadBalancer: false enableReplicaLoadBalancer: false - #enableConnectionPooler: true # not needed when connectionPooler section is present (see below) - enableReplicaConnectionPooler: true # set to enable connectionPooler for replica endpoints + enableConnectionPooler: true # not needed when connectionPooler section is present (see below) + #enableReplicaConnectionPooler: true # set to enable connectionPooler for replica endpoints allowedSourceRanges: # load balancers' source ranges for both master and replica services - 127.0.0.1/32 databases: diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 9b8b51eb0..71752886c 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -55,8 +55,9 @@ type Config struct { // K8S objects that are belongs to a connection pooler type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service + Deployment *appsv1.Deployment + Service *v1.Service + ReplService *v1.Service // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index fb9843289..aff3d62bf 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2113,10 +2113,6 @@ func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.Env Name: "CONNECTION_POOLER_MASTER_PORT", Value: fmt.Sprint(pgPort), }, - { - Name: "CONNECTION_POOLER_REPLICA_PORT", - Value: fmt.Sprint(5433), - }, { Name: "CONNECTION_POOLER_MODE", Value: effectiveMode, @@ -2333,41 +2329,19 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1 if spec.ConnectionPooler == nil { spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - var serviceSpec = v1.ServiceSpec{} - if *spec.EnableReplicaConnectionPooler == false { - serviceSpec = v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: c.connectionPoolerName(), - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, - }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(), - }, - } - } else { - serviceSpec = v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: c.connectionPoolerName(), - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, - }, - { - Name: c.connectionPoolerName() + "-repl", - Port: 5433, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(Replica)}, - }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(), + serviceSpec := v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: c.connectionPoolerName(), + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, }, - } + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler-repl": c.connectionPoolerName(), + }, } service := &v1.Service{ @@ -2390,6 +2364,42 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1 return service } +func (c *Cluster) generateReplicaConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service { + + replicaserviceSpec := v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: c.connectionPoolerName() + "-repl", + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(Replica)}, + }, + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler-repl": c.connectionPoolerName() + "-repl", + }, + } + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName() + "-repl", + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector().MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this service, but there is a hope that this object will + // be garbage collected if something went wrong and operator didn't + // deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: replicaserviceSpec, + } + + return service +} + func ensurePath(file string, defaultDir string, defaultFile string) string { if file == "" { return path.Join(defaultDir, defaultFile) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index a9d13c124..160e42548 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -152,11 +152,27 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo if err != nil { return nil, err } + if c.Spec.EnableReplicaConnectionPooler != nil && *c.Spec.EnableReplicaConnectionPooler == true { + replServiceSpec := c.generateReplicaConnectionPoolerService(&c.Spec) + replService, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), replServiceSpec, metav1.CreateOptions{}) - c.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: deployment, - Service: service, + if err != nil { + return nil, err + } + c.ConnectionPooler = &ConnectionPoolerObjects{ + Deployment: deployment, + Service: service, + ReplService: replService, + } + } else { + c.ConnectionPooler = &ConnectionPoolerObjects{ + Deployment: deployment, + Service: service, + } } + c.logger.Debugf("created new connection pooler %q, uid: %q", util.NameFromMeta(deployment.ObjectMeta), deployment.UID) From c1e42a8d5a734d8a8f22f939d17b464b69822a4b Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 2 Sep 2020 13:46:36 +0200 Subject: [PATCH 07/40] Enable connection pooler for replica - Refactor code for connection pooler deployment and services - Refactor sync code for connection pooler - Rename EnableConnectionPooler to EnableMasterConnectionPooler - Update yamls and tests --- .../postgres-operator/crds/postgresqls.yaml | 4 +- docs/reference/cluster_manifest.md | 4 +- docs/user.md | 4 +- e2e/tests/test_e2e.py | 4 +- manifests/complete-postgres-manifest.yaml | 4 +- manifests/postgresql.crd.yaml | 4 +- pkg/apis/acid.zalan.do/v1/crds.go | 2 +- pkg/apis/acid.zalan.do/v1/postgresql_type.go | 2 +- pkg/cluster/cluster.go | 7 +- pkg/cluster/cluster_test.go | 2 +- pkg/cluster/database.go | 2 +- pkg/cluster/k8sres.go | 99 +++++++++--- pkg/cluster/k8sres_test.go | 10 +- pkg/cluster/resources.go | 90 ++++++----- pkg/cluster/resources_test.go | 10 +- pkg/cluster/sync.go | 151 +++++++++++------- pkg/cluster/sync_test.go | 10 +- pkg/cluster/util.go | 13 +- ui/operator_ui/main.py | 25 ++- 19 files changed, 291 insertions(+), 156 deletions(-) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 0d444e568..419df1a81 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -185,8 +185,10 @@ spec: # Note: usernames specified here as database owners must be declared in the users key of the spec key. dockerImage: type: string - enableConnectionPooler: + enableMasterConnectionPooler: type: boolean + enableReplicaConnectionPooler: + type: boolean enableLogicalBackup: type: boolean enableMasterLoadBalancer: diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 70ab14855..71e44d4d4 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -150,7 +150,7 @@ These parameters are grouped directly under the `spec` key in the manifest. is `false`, then no volume will be mounted no matter how operator was configured (so you can override the operator configuration). Optional. -* **enableConnectionPooler** +* **enableMasterConnectionPooler** Tells the operator to create a connection pooler with a database. If this field is true, a connection pooler deployment will be created even if `connectionPooler` section is empty. Optional, not set by default. @@ -397,7 +397,7 @@ CPU and memory limits for the sidecar container. Parameters are grouped under the `connectionPooler` top-level key and specify configuration for connection pooler. If this section is not empty, a connection -pooler will be created for a database even if `enableConnectionPooler` is not +pooler will be created for a database even if `enableMasterConnectionPooler` is not present. * **numberOfInstances** diff --git a/docs/user.md b/docs/user.md index a4b1424b8..a63017655 100644 --- a/docs/user.md +++ b/docs/user.md @@ -736,7 +736,7 @@ manifest: ```yaml spec: - enableConnectionPooler: true + enableMasterConnectionPooler: true ``` This will tell the operator to create a connection pooler with default @@ -772,7 +772,7 @@ spec: memory: 100Mi ``` -The `enableConnectionPooler` flag is not required when the `connectionPooler` +The `enableMasterConnectionPooler` flag is not required when the `connectionPooler` section is present in the manifest. But, it can be used to disable/remove the pooler while keeping its configuration. diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 550d3ced8..a0870202f 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -104,7 +104,7 @@ def test_enable_disable_connection_pooler(self): 'postgresqls', 'acid-minimal-cluster', { 'spec': { - 'enableConnectionPooler': True, + 'enableMasterConnectionPooler': True, } }) k8s.wait_for_pod_start(pod_selector) @@ -146,7 +146,7 @@ def test_enable_disable_connection_pooler(self): 'postgresqls', 'acid-minimal-cluster', { 'spec': { - 'enableConnectionPooler': False, + 'enableMasterConnectionPooler': False, } }) k8s.wait_for_pods_to_stop(pod_selector) diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 42b7c13a4..4b6239bdd 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -18,8 +18,8 @@ spec: - createdb enableMasterLoadBalancer: false enableReplicaLoadBalancer: false - enableConnectionPooler: true # not needed when connectionPooler section is present (see below) - #enableReplicaConnectionPooler: true # set to enable connectionPooler for replica endpoints + enableMasterConnectionPooler: true # not needed when connectionPooler section is present (see below) + enableReplicaConnectionPooler: true # set to enable connectionPooler for replica endpoints allowedSourceRanges: # load balancers' source ranges for both master and replica services - 127.0.0.1/32 databases: diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 97b72a8ca..7d0e40cc0 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -181,8 +181,10 @@ spec: # Note: usernames specified here as database owners must be declared in the users key of the spec key. dockerImage: type: string - enableConnectionPooler: + enableMasterConnectionPooler: type: boolean + enableReplicaConnectionPooler: + type: boolean enableLogicalBackup: type: boolean enableMasterLoadBalancer: diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index a2d862ee3..be16062a9 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -259,7 +259,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "dockerImage": { Type: "string", }, - "enableConnectionPooler": { + "enableMasterConnectionPooler": { Type: "boolean", }, "enableReplicaConnectionPooler": { diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index a3dc490b5..02f66fbf1 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -29,7 +29,7 @@ type PostgresSpec struct { Patroni `json:"patroni,omitempty"` Resources `json:"resources,omitempty"` - EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` + EnableMasterConnectionPooler *bool `json:"enableMasterConnectionPooler,omitempty"` EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"` ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 71752886c..4172e6d7e 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -55,9 +55,10 @@ type Config struct { // K8S objects that are belongs to a connection pooler type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service - ReplService *v1.Service + Deployment *appsv1.Deployment + ReplDeployment *appsv1.Deployment + Service *v1.Service + ReplService *v1.Service // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 1f6510e65..f7c186d0c 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -719,7 +719,7 @@ func TestInitSystemUsers(t *testing.T) { } // cluster with connection pooler - cl.Spec.EnableConnectionPooler = boolToPointer(true) + cl.Spec.EnableMasterConnectionPooler = boolToPointer(true) cl.initSystemUsers() if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist { t.Errorf("%s, connection pooler user is not present", testName) diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index 75e2d2097..1a38bd41d 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -462,7 +462,7 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi } // Creates a connection pool credentials lookup function in every database to -// perform remote authentification. +// perform remote authentication. func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { var stmtBytes bytes.Buffer c.logger.Info("Installing lookup function") diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index aff3d62bf..3ce9cd822 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -75,8 +75,12 @@ func (c *Cluster) statefulSetName() string { return c.Name } -func (c *Cluster) connectionPoolerName() string { - return c.Name + "-pooler" +func (c *Cluster) connectionPoolerName(role PostgresRole) string { + name := c.Name + "-pooler" + if role == Replica { + name = name + "-repl" + } + return name } func (c *Cluster) endpointName(role PostgresRole) string { @@ -2110,7 +2114,7 @@ func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.Env return []v1.EnvVar{ { - Name: "CONNECTION_POOLER_MASTER_PORT", + Name: "CONNECTION_POOLER_PORT", Value: fmt.Sprint(pgPort), }, { @@ -2140,7 +2144,7 @@ func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.Env } } -func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec) ( +func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( *v1.PodTemplateSpec, error) { gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) @@ -2176,11 +2180,11 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec) envVars := []v1.EnvVar{ { Name: "PGHOST", - Value: c.serviceAddress(Master), + Value: c.serviceAddress(role), }, { Name: "PGPORT", - Value: c.servicePort(Master), + Value: c.servicePort(role), }, { Name: "PGUSER", @@ -2262,7 +2266,7 @@ func (c *Cluster) ownerReferences() []metav1.OwnerReference { } } -func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) ( +func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( *appsv1.Deployment, error) { // there are two ways to enable connection pooler, either to specify a @@ -2275,7 +2279,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec) + podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( @@ -2294,9 +2298,12 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) return nil, err } + var name string + name = c.connectionPoolerName(role) + deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(), + Name: name, Namespace: c.Namespace, Labels: c.connectionPoolerLabelsSelector().MatchLabels, Annotations: map[string]string{}, @@ -2318,7 +2325,53 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) return deployment, nil } -func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service { +/* func (c *Cluster) generateReplicaConnectionPoolerDeployment(spec *acidv1.PostgresSpec) ( + *appsv1.Deployment, error) { + + podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, Replica) + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + if *numberOfInstances < constants.ConnectionPoolerMinInstances { + msg := "Adjusted number of connection pooler instances from %d to %d" + c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) + + *numberOfInstances = constants.ConnectionPoolerMinInstances + } + + if err != nil { + return nil, err + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName() + "-repl", + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector().MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this deployment, but there is a hope that this object + // will be garbage collected if something went wrong and operator + // didn't deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: numberOfInstances, + Selector: c.connectionPoolerLabelsSelector(), + Template: *podTemplate, + }, + } + + return deployment, nil +} */ + +func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { // there are two ways to enable connection pooler, either to specify a // connectionPooler section or enableConnectionPooler. In the second case @@ -2329,24 +2382,26 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1 if spec.ConnectionPooler == nil { spec.ConnectionPooler = &acidv1.ConnectionPooler{} } + name := c.connectionPoolerName(role) serviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ { - Name: c.connectionPoolerName(), + Name: name, Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, }, }, Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler-repl": c.connectionPoolerName(), - }, } - + if role == Replica { + serviceSpec.Selector = c.roleLabelsSet(false, Replica) + } else { + serviceSpec.Selector = map[string]string{"connection-pooler": name} + } service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(), + Name: name, Namespace: c.Namespace, Labels: c.connectionPoolerLabelsSelector().MatchLabels, Annotations: map[string]string{}, @@ -2364,7 +2419,7 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1 return service } -func (c *Cluster) generateReplicaConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service { +/* func (c *Cluster) generateReplicaConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service { replicaserviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ @@ -2374,10 +2429,8 @@ func (c *Cluster) generateReplicaConnectionPoolerService(spec *acidv1.PostgresSp TargetPort: intstr.IntOrString{StrVal: c.servicePort(Replica)}, }, }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler-repl": c.connectionPoolerName() + "-repl", - }, + Type: v1.ServiceTypeClusterIP, + Selector: c.roleLabelsSet(false, Replica), } service := &v1.Service{ @@ -2398,7 +2451,7 @@ func (c *Cluster) generateReplicaConnectionPoolerService(spec *acidv1.PostgresSp } return service -} +} */ func ensurePath(file string, defaultDir string, defaultFile string) string { if file == "" { diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index f44b071bb..c467d3545 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1090,7 +1090,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) { }, } for _, tt := range tests { - podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec) + podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec, Master) if err != tt.expected && err.Error() != tt.expected.Error() { t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", @@ -1192,7 +1192,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { }, } for _, tt := range tests { - deployment, err := tt.cluster.generateConnectionPoolerDeployment(tt.spec) + deployment, err := tt.cluster.generateConnectionPoolerDeployment(tt.spec, Master) if err != tt.expected && err.Error() != tt.expected.Error() { t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", @@ -1221,9 +1221,9 @@ func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error { func testServiceSelector(cluster *Cluster, service *v1.Service) error { selector := service.Spec.Selector - if selector["connection-pooler"] != cluster.connectionPoolerName() { + if selector["connection-pooler"] != cluster.connectionPoolerName(Master) { return fmt.Errorf("Selector is incorrect, got %s, expected %s", - selector["connection-pooler"], cluster.connectionPoolerName()) + selector["connection-pooler"], cluster.connectionPoolerName(Master)) } return nil @@ -1289,7 +1289,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { }, } for _, tt := range tests { - service := tt.cluster.generateConnectionPoolerService(tt.spec) + service := tt.cluster.generateConnectionPoolerService(tt.spec, Master) if err := tt.check(cluster, service); err != nil { t.Errorf("%s [%s]: Service spec is incorrect, %+v", diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 160e42548..84c0b14fe 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -126,56 +126,74 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } + if c.Spec.EnableMasterConnectionPooler != nil || c.ConnectionPooler != nil { + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Master) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - serviceSpec := c.generateConnectionPoolerService(&c.Spec) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + serviceSpec := c.generateConnectionPoolerService(&c.Spec, Master) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - if err != nil { - return nil, err + if err != nil { + return nil, err + } + c.ConnectionPooler = &ConnectionPoolerObjects{ + Deployment: deployment, + Service: service, + } + c.logger.Debugf("created new connection pooler %q, uid: %q", + util.NameFromMeta(deployment.ObjectMeta), deployment.UID) } + if c.Spec.EnableReplicaConnectionPooler != nil && *c.Spec.EnableReplicaConnectionPooler == true { - replServiceSpec := c.generateReplicaConnectionPoolerService(&c.Spec) + repldeploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Replica) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + repldeployment, err := c.KubeClient. + Deployments(repldeploymentSpec.Namespace). + Create(context.TODO(), repldeploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + + replServiceSpec := c.generateConnectionPoolerService(&c.Spec, Replica) replService, err := c.KubeClient. - Services(serviceSpec.Namespace). + Services(replServiceSpec.Namespace). Create(context.TODO(), replServiceSpec, metav1.CreateOptions{}) if err != nil { return nil, err } c.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: deployment, - Service: service, - ReplService: replService, - } - } else { - c.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: deployment, - Service: service, + ReplDeployment: repldeployment, + ReplService: replService, } + c.logger.Debugf("created new connection pooler for replica %q, uid: %q", + util.NameFromMeta(repldeployment.ObjectMeta), repldeployment.UID) } - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - return c.ConnectionPooler, nil } @@ -192,7 +210,7 @@ func (c *Cluster) deleteConnectionPooler() (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate - deploymentName := c.connectionPoolerName() + deploymentName := c.connectionPoolerName(Master) deployment := c.ConnectionPooler.Deployment if deployment != nil { @@ -217,7 +235,7 @@ func (c *Cluster) deleteConnectionPooler() (err error) { // Repeat the same for the service object service := c.ConnectionPooler.Service - serviceName := c.connectionPoolerName() + serviceName := c.connectionPoolerName(Master) if service != nil { serviceName = service.Name diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index 9739cc354..ca7683160 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -97,7 +97,7 @@ func TestNeedConnectionPooler(t *testing.T) { } cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), + EnableMasterConnectionPooler: boolToPointer(true), } if !cluster.needConnectionPooler() { @@ -106,8 +106,8 @@ func TestNeedConnectionPooler(t *testing.T) { } cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(false), - ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableMasterConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, } if cluster.needConnectionPooler() { @@ -116,8 +116,8 @@ func TestNeedConnectionPooler(t *testing.T) { } cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableMasterConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, } if !cluster.needConnectionPooler() { diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index fef5b7b66..fb064f53e 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -922,34 +922,18 @@ func (c *Cluster) syncConnectionPooler(oldSpec, func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql) ( SyncReason, error) { - deployment, err := c.KubeClient. - Deployments(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Deployment %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName()) - - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return NoSync, fmt.Errorf(msg, err) - } - - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - - c.ConnectionPooler.Deployment = deployment - } else if err != nil { + masterdeployment, err := c.checkAndCreateConnectionPoolerDeployment(Master, newSpec) + if err != nil { + msg := "could not get connection pooler deployment to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } + replicadeployment, err := c.checkAndCreateConnectionPoolerDeployment(Replica, newSpec) + if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - c.ConnectionPooler.Deployment = deployment + c.ConnectionPooler.Deployment = masterdeployment + c.ConnectionPooler.ReplDeployment = replicadeployment // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler @@ -968,32 +952,28 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql newConnectionPooler = &acidv1.ConnectionPooler{} } - c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) + c.logger.Infof("Old: %+v, New: %+v", oldConnectionPooler, newConnectionPooler) specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, masterdeployment) reason := append(specReason, defaultsReason...) if specSync || defaultsSync { - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(), reason) - - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec) + newmasterdeployment, err := c.UpdateConnectionPoolerDeploymentSub(Master, reason[:], newSpec) if err != nil { - msg := "could not generate deployment for connection pooler: %v" - return reason, fmt.Errorf(msg, err) + return reason, err } + c.ConnectionPooler.Deployment = newmasterdeployment + return reason, nil + } - oldDeploymentSpec := c.ConnectionPooler.Deployment - - deployment, err := c.updateConnectionPoolerDeployment( - oldDeploymentSpec, - newDeploymentSpec) - + defaultsSync, defaultsReason = c.needSyncConnectionPoolerDefaults(newConnectionPooler, replicadeployment) + reason = append(specReason, defaultsReason...) + if specSync || defaultsSync { + newreplicadeployment, err := c.UpdateConnectionPoolerDeploymentSub(Replica, reason[:], newSpec) if err != nil { return reason, err } - - c.ConnectionPooler.Deployment = deployment + c.ConnectionPooler.Deployment = newreplicadeployment return reason, nil } } @@ -1002,32 +982,95 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if newAnnotations != nil { c.updateConnectionPoolerAnnotations(newAnnotations) } + masterservice, err := c.checkAndCreateConnectionPoolerService(Master, newSpec) + if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } + replicaservice, err := c.checkAndCreateConnectionPoolerService(Replica, newSpec) + if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + // Service updates are not supported and probably not that useful anyway + c.ConnectionPooler.Service = masterservice + c.ConnectionPooler.ReplService = replicaservice + } + + return NoSync, nil +} + +func (c *Cluster) UpdateConnectionPoolerDeploymentSub(role PostgresRole, reason []string, newSpec *acidv1.Postgresql) (*appsv1.Deployment, error) { + + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) + + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg := "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + + oldDeploymentSpec := c.ConnectionPooler.Deployment + + deployment, err := c.updateConnectionPoolerDeployment( + oldDeploymentSpec, + newDeploymentSpec) + + if err != nil { + return nil, err + } + return deployment, nil +} + +func (c *Cluster) checkAndCreateConnectionPoolerDeployment(role PostgresRole, newSpec *acidv1.Postgresql) (*appsv1.Deployment, error) { + + deployment, err := c.KubeClient. + Deployments(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Deployment %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + return deployment, nil + } + return deployment, nil +} + +func (c *Cluster) checkAndCreateConnectionPoolerService(role PostgresRole, newSpec *acidv1.Postgresql) (*v1.Service, error) { service, err := c.KubeClient. Services(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{}) + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) if err != nil && k8sutil.ResourceNotFound(err) { msg := "Service %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName()) + c.logger.Warningf(msg, c.connectionPoolerName(role)) - serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec) + serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) service, err := c.KubeClient. Services(serviceSpec.Namespace). Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) if err != nil { - return NoSync, err + return nil, err } - c.ConnectionPooler.Service = service - } else if err != nil { - msg := "could not get connection pooler service to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler.Service = service + return service, nil } - - return NoSync, nil + return service, nil } diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index d9248ae33..eaffbf475 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -139,7 +139,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { }, newSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), + EnableMasterConnectionPooler: boolToPointer(true), }, }, cluster: clusterMissingObjects, @@ -232,14 +232,14 @@ func TestConnectionPoolerSynchronization(t *testing.T) { subTest: "there is no sync from nil to an empty spec", oldSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: nil, + EnableMasterConnectionPooler: boolToPointer(true), + ConnectionPooler: nil, }, }, newSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableMasterConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, }, }, cluster: clusterMock, diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 7559ce3d4..578eaf40e 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -424,7 +424,7 @@ func (c *Cluster) connectionPoolerLabelsSelector() *metav1.LabelSelector { connectionPoolerLabels := labels.Set(map[string]string{}) extraLabels := labels.Set(map[string]string{ - "connection-pooler": c.connectionPoolerName(), + "connection-pooler": c.connectionPoolerName(Master), "application": "db-connection-pooler", }) @@ -520,11 +520,16 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { } func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - if spec.EnableConnectionPooler == nil { + if spec.EnableMasterConnectionPooler != nil { + return *spec.EnableMasterConnectionPooler + } else if spec.EnableReplicaConnectionPooler != nil { + return *spec.EnableReplicaConnectionPooler + } else if spec.ConnectionPooler == nil { return spec.ConnectionPooler != nil - } else { - return *spec.EnableConnectionPooler } + // if the connectionPooler section is there, then we enable even though the + // flags are not there + return true } func (c *Cluster) needConnectionPooler() bool { diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index dc2450b9f..634c9f8d4 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -607,16 +607,27 @@ def update_postgresql(namespace: str, cluster: str): spec['volume'] = {'size': size} - if 'enableConnectionPooler' in postgresql['spec']: - cp = postgresql['spec']['enableConnectionPooler'] + if 'enableMasterConnectionPooler' in postgresql['spec']: + cp = postgresql['spec']['enableMasterConnectionPooler'] if not cp: - if 'enableConnectionPooler' in o['spec']: - del o['spec']['enableConnectionPooler'] + if 'enableMasterConnectionPooler' in o['spec']: + del o['spec']['enableMasterConnectionPooler'] else: - spec['enableConnectionPooler'] = True + spec['enableMasterConnectionPooler'] = True else: - if 'enableConnectionPooler' in o['spec']: - del o['spec']['enableConnectionPooler'] + if 'enableMasterConnectionPooler' in o['spec']: + del o['spec']['enableMasterConnectionPooler'] + + if 'enableReplicaConnectionPooler' in postgresql['spec']: + cp = postgresql['spec']['enableReplicaConnectionPooler'] + if not cp: + if 'enableReplicaConnectionPooler' in o['spec']: + del o['spec']['enableReplicaConnectionPooler'] + else: + spec['enableReplicaConnectionPooler'] = True + else: + if 'enableReplicaConnectionPooler' in o['spec']: + del o['spec']['enableReplicaConnectionPooler'] if 'enableReplicaLoadBalancer' in postgresql['spec']: rlb = postgresql['spec']['enableReplicaLoadBalancer'] From dd18c185e3b7fd79b1f1a6b765ba6c05ea709d0f Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 2 Sep 2020 17:17:56 +0200 Subject: [PATCH 08/40] Fix labels for the replica pods --- pkg/cluster/k8sres.go | 88 ++------------------------------------ pkg/cluster/k8sres_test.go | 4 +- pkg/cluster/util.go | 4 +- 3 files changed, 8 insertions(+), 88 deletions(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 3ce9cd822..6fb64e1ee 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2224,7 +2224,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, podTemplate := &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: c.connectionPoolerLabelsSelector().MatchLabels, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, Namespace: c.Namespace, Annotations: c.generatePodAnnotations(spec), }, @@ -2305,7 +2305,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector().MatchLabels, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -2317,7 +2317,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, }, Spec: appsv1.DeploymentSpec{ Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(), + Selector: c.connectionPoolerLabelsSelector(role), Template: *podTemplate, }, } @@ -2325,52 +2325,6 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, return deployment, nil } -/* func (c *Cluster) generateReplicaConnectionPoolerDeployment(spec *acidv1.PostgresSpec) ( - *appsv1.Deployment, error) { - - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, Replica) - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - if *numberOfInstances < constants.ConnectionPoolerMinInstances { - msg := "Adjusted number of connection pooler instances from %d to %d" - c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) - - *numberOfInstances = constants.ConnectionPoolerMinInstances - } - - if err != nil { - return nil, err - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName() + "-repl", - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector().MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this deployment, but there is a hope that this object - // will be garbage collected if something went wrong and operator - // didn't deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: appsv1.DeploymentSpec{ - Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(), - Template: *podTemplate, - }, - } - - return deployment, nil -} */ - func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { // there are two ways to enable connection pooler, either to specify a @@ -2403,7 +2357,7 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, rol ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector().MatchLabels, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -2419,40 +2373,6 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, rol return service } -/* func (c *Cluster) generateReplicaConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service { - - replicaserviceSpec := v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: c.connectionPoolerName() + "-repl", - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(Replica)}, - }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: c.roleLabelsSet(false, Replica), - } - - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName() + "-repl", - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector().MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this service, but there is a hope that this object will - // be garbage collected if something went wrong and operator didn't - // deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: replicaserviceSpec, - } - - return service -} */ - func ensurePath(file string, defaultDir string, defaultFile string) string { if file == "" { return path.Join(defaultDir, defaultFile) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index c467d3545..4ed0fc57b 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -960,7 +960,7 @@ func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] - if poolerLabels != cluster.connectionPoolerLabelsSelector().MatchLabels["connection-pooler"] { + if poolerLabels != cluster.connectionPoolerLabelsSelector(Master).MatchLabels["connection-pooler"] { return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector().MatchLabels) } @@ -1118,7 +1118,7 @@ func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployme func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { labels := deployment.Spec.Selector.MatchLabels - expected := cluster.connectionPoolerLabelsSelector().MatchLabels + expected := cluster.connectionPoolerLabelsSelector(Master).MatchLabels if labels["connection-pooler"] != expected["connection-pooler"] { return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 578eaf40e..3f0dff0b1 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -420,11 +420,11 @@ func (c *Cluster) labelsSelector() *metav1.LabelSelector { // have e.g. different `application` label, so that recreatePod operation will // not interfere with it (it lists all the pods via labels, and if there would // be no difference, it will recreate also pooler pods). -func (c *Cluster) connectionPoolerLabelsSelector() *metav1.LabelSelector { +func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { connectionPoolerLabels := labels.Set(map[string]string{}) extraLabels := labels.Set(map[string]string{ - "connection-pooler": c.connectionPoolerName(Master), + "connection-pooler": c.connectionPoolerName(role), "application": "db-connection-pooler", }) From 470234022a82c701d9d4fd27d0611dbe70a012a3 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Thu, 3 Sep 2020 09:32:48 +0200 Subject: [PATCH 09/40] fix for labels selector --- pkg/cluster/k8sres.go | 8 +++----- pkg/cluster/k8sres_test.go | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 6fb64e1ee..9e87f9fbb 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2348,11 +2348,9 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, rol }, Type: v1.ServiceTypeClusterIP, } - if role == Replica { - serviceSpec.Selector = c.roleLabelsSet(false, Replica) - } else { - serviceSpec.Selector = map[string]string{"connection-pooler": name} - } + + serviceSpec.Selector = map[string]string{"connection-pooler": name} + service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 4ed0fc57b..58f811adc 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -962,7 +962,7 @@ func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { if poolerLabels != cluster.connectionPoolerLabelsSelector(Master).MatchLabels["connection-pooler"] { return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", - podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector().MatchLabels) + podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(Master).MatchLabels) } return nil From 99abcdcdeb11fb30e29e86c60a24e9ae2f11ea9f Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Thu, 3 Sep 2020 16:01:25 +0200 Subject: [PATCH 10/40] Assorted changes - Update deleteConnectionPooler to include role - Rename EnableMasterConnectionPooler back to original name for backward compatiility - other minor chnages and code improvements --- charts/postgres-operator/crds/postgresqls.yaml | 2 +- docs/reference/cluster_manifest.md | 4 ++-- docs/user.md | 4 ++-- e2e/tests/test_e2e.py | 4 ++-- manifests/complete-postgres-manifest.yaml | 2 +- manifests/postgresql.crd.yaml | 2 +- pkg/apis/acid.zalan.do/v1/crds.go | 2 +- pkg/apis/acid.zalan.do/v1/postgresql_type.go | 2 +- pkg/cluster/cluster.go | 7 +++++-- pkg/cluster/cluster_test.go | 2 +- pkg/cluster/k8sres.go | 15 +++++---------- pkg/cluster/resources.go | 8 ++++---- pkg/cluster/resources_test.go | 12 ++++++------ pkg/cluster/sync.go | 13 ++++++++----- pkg/cluster/sync_test.go | 10 +++++----- pkg/cluster/util.go | 4 ++-- ui/operator_ui/main.py | 14 +++++++------- 17 files changed, 54 insertions(+), 53 deletions(-) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 419df1a81..11f8b972e 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -185,7 +185,7 @@ spec: # Note: usernames specified here as database owners must be declared in the users key of the spec key. dockerImage: type: string - enableMasterConnectionPooler: + enableConnectionPooler: type: boolean enableReplicaConnectionPooler: type: boolean diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 71e44d4d4..70ab14855 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -150,7 +150,7 @@ These parameters are grouped directly under the `spec` key in the manifest. is `false`, then no volume will be mounted no matter how operator was configured (so you can override the operator configuration). Optional. -* **enableMasterConnectionPooler** +* **enableConnectionPooler** Tells the operator to create a connection pooler with a database. If this field is true, a connection pooler deployment will be created even if `connectionPooler` section is empty. Optional, not set by default. @@ -397,7 +397,7 @@ CPU and memory limits for the sidecar container. Parameters are grouped under the `connectionPooler` top-level key and specify configuration for connection pooler. If this section is not empty, a connection -pooler will be created for a database even if `enableMasterConnectionPooler` is not +pooler will be created for a database even if `enableConnectionPooler` is not present. * **numberOfInstances** diff --git a/docs/user.md b/docs/user.md index a63017655..a4b1424b8 100644 --- a/docs/user.md +++ b/docs/user.md @@ -736,7 +736,7 @@ manifest: ```yaml spec: - enableMasterConnectionPooler: true + enableConnectionPooler: true ``` This will tell the operator to create a connection pooler with default @@ -772,7 +772,7 @@ spec: memory: 100Mi ``` -The `enableMasterConnectionPooler` flag is not required when the `connectionPooler` +The `enableConnectionPooler` flag is not required when the `connectionPooler` section is present in the manifest. But, it can be used to disable/remove the pooler while keeping its configuration. diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index a0870202f..550d3ced8 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -104,7 +104,7 @@ def test_enable_disable_connection_pooler(self): 'postgresqls', 'acid-minimal-cluster', { 'spec': { - 'enableMasterConnectionPooler': True, + 'enableConnectionPooler': True, } }) k8s.wait_for_pod_start(pod_selector) @@ -146,7 +146,7 @@ def test_enable_disable_connection_pooler(self): 'postgresqls', 'acid-minimal-cluster', { 'spec': { - 'enableMasterConnectionPooler': False, + 'enableConnectionPooler': False, } }) k8s.wait_for_pods_to_stop(pod_selector) diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 4b6239bdd..ec5398a9d 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -18,7 +18,7 @@ spec: - createdb enableMasterLoadBalancer: false enableReplicaLoadBalancer: false - enableMasterConnectionPooler: true # not needed when connectionPooler section is present (see below) + enableConnectionPooler: true # not needed when connectionPooler section is present (see below) enableReplicaConnectionPooler: true # set to enable connectionPooler for replica endpoints allowedSourceRanges: # load balancers' source ranges for both master and replica services - 127.0.0.1/32 diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 7d0e40cc0..9df76cc73 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -181,7 +181,7 @@ spec: # Note: usernames specified here as database owners must be declared in the users key of the spec key. dockerImage: type: string - enableMasterConnectionPooler: + enableConnectionPooler: type: boolean enableReplicaConnectionPooler: type: boolean diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index be16062a9..a2d862ee3 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -259,7 +259,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "dockerImage": { Type: "string", }, - "enableMasterConnectionPooler": { + "enableConnectionPooler": { Type: "boolean", }, "enableReplicaConnectionPooler": { diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 02f66fbf1..a3dc490b5 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -29,7 +29,7 @@ type PostgresSpec struct { Patroni `json:"patroni,omitempty"` Resources `json:"resources,omitempty"` - EnableMasterConnectionPooler *bool `json:"enableMasterConnectionPooler,omitempty"` + EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"` ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 4172e6d7e..dff96e47f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -840,9 +840,12 @@ func (c *Cluster) Delete() { // Delete connection pooler objects anyway, even if it's not mentioned in the // manifest, just to not keep orphaned components in case if something went // wrong - if err := c.deleteConnectionPooler(); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + for _, role := range [2]PostgresRole{Master, Replica} { + if err := c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } } + } //NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status). diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index f7c186d0c..1f6510e65 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -719,7 +719,7 @@ func TestInitSystemUsers(t *testing.T) { } // cluster with connection pooler - cl.Spec.EnableMasterConnectionPooler = boolToPointer(true) + cl.Spec.EnableConnectionPooler = boolToPointer(true) cl.initSystemUsers() if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist { t.Errorf("%s, connection pooler user is not present", testName) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 9e87f9fbb..33f9234b6 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2298,12 +2298,9 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, return nil, err } - var name string - name = c.connectionPoolerName(role) - deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: c.connectionPoolerName(role), Namespace: c.Namespace, Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, @@ -2336,24 +2333,22 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, rol if spec.ConnectionPooler == nil { spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - name := c.connectionPoolerName(role) serviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ { - Name: name, + Name: c.connectionPoolerName(role), Port: pgPort, TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, }, }, - Type: v1.ServiceTypeClusterIP, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{"connection-pooler": c.connectionPoolerName(role)}, } - serviceSpec.Selector = map[string]string{"connection-pooler": name} - service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: c.connectionPoolerName(role), Namespace: c.Namespace, Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 84c0b14fe..fb0366bfe 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -126,7 +126,7 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } - if c.Spec.EnableMasterConnectionPooler != nil || c.ConnectionPooler != nil { + if c.Spec.EnableConnectionPooler != nil || c.ConnectionPooler != nil { deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Master) if err != nil { msg = "could not generate deployment for connection pooler: %v" @@ -197,7 +197,7 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo return c.ConnectionPooler, nil } -func (c *Cluster) deleteConnectionPooler() (err error) { +func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { c.setProcessName("deleting connection pooler") c.logger.Debugln("deleting connection pooler") @@ -210,7 +210,7 @@ func (c *Cluster) deleteConnectionPooler() (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate - deploymentName := c.connectionPoolerName(Master) + deploymentName := c.connectionPoolerName(role) deployment := c.ConnectionPooler.Deployment if deployment != nil { @@ -235,7 +235,7 @@ func (c *Cluster) deleteConnectionPooler() (err error) { // Repeat the same for the service object service := c.ConnectionPooler.Service - serviceName := c.connectionPoolerName(Master) + serviceName := c.connectionPoolerName(role) if service != nil { serviceName = service.Name diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index ca7683160..144d8a051 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -62,7 +62,7 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { t.Errorf("%s: Connection pooler service is empty", testName) } - err = cluster.deleteConnectionPooler() + err = cluster.deleteConnectionPooler(Master) if err != nil { t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) } @@ -97,7 +97,7 @@ func TestNeedConnectionPooler(t *testing.T) { } cluster.Spec = acidv1.PostgresSpec{ - EnableMasterConnectionPooler: boolToPointer(true), + EnableConnectionPooler: boolToPointer(true), } if !cluster.needConnectionPooler() { @@ -106,8 +106,8 @@ func TestNeedConnectionPooler(t *testing.T) { } cluster.Spec = acidv1.PostgresSpec{ - EnableMasterConnectionPooler: boolToPointer(false), - ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, } if cluster.needConnectionPooler() { @@ -116,8 +116,8 @@ func TestNeedConnectionPooler(t *testing.T) { } cluster.Spec = acidv1.PostgresSpec{ - EnableMasterConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, } if !cluster.needConnectionPooler() { diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index fb064f53e..e2109bf93 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -895,8 +895,10 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources - if err = c.deleteConnectionPooler(); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + for _, role := range [2]PostgresRole{Master, Replica} { + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } } } @@ -905,9 +907,10 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if c.ConnectionPooler != nil && (c.ConnectionPooler.Deployment != nil || c.ConnectionPooler.Service != nil) { - - if err = c.deleteConnectionPooler(); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + for _, role := range [2]PostgresRole{Master, Replica} { + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } } } } diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index eaffbf475..d9248ae33 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -139,7 +139,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { }, newSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - EnableMasterConnectionPooler: boolToPointer(true), + EnableConnectionPooler: boolToPointer(true), }, }, cluster: clusterMissingObjects, @@ -232,14 +232,14 @@ func TestConnectionPoolerSynchronization(t *testing.T) { subTest: "there is no sync from nil to an empty spec", oldSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - EnableMasterConnectionPooler: boolToPointer(true), - ConnectionPooler: nil, + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: nil, }, }, newSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - EnableMasterConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, }, }, cluster: clusterMock, diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 3f0dff0b1..6ffe9899b 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -520,8 +520,8 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { } func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - if spec.EnableMasterConnectionPooler != nil { - return *spec.EnableMasterConnectionPooler + if spec.EnableConnectionPooler != nil { + return *spec.EnableConnectionPooler } else if spec.EnableReplicaConnectionPooler != nil { return *spec.EnableReplicaConnectionPooler } else if spec.ConnectionPooler == nil { diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index 634c9f8d4..bf3cc05ea 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -607,16 +607,16 @@ def update_postgresql(namespace: str, cluster: str): spec['volume'] = {'size': size} - if 'enableMasterConnectionPooler' in postgresql['spec']: - cp = postgresql['spec']['enableMasterConnectionPooler'] + if 'enableConnectionPooler' in postgresql['spec']: + cp = postgresql['spec']['enableConnectionPooler'] if not cp: - if 'enableMasterConnectionPooler' in o['spec']: - del o['spec']['enableMasterConnectionPooler'] + if 'enableConnectionPooler' in o['spec']: + del o['spec']['enableConnectionPooler'] else: - spec['enableMasterConnectionPooler'] = True + spec['enableConnectionPooler'] = True else: - if 'enableMasterConnectionPooler' in o['spec']: - del o['spec']['enableMasterConnectionPooler'] + if 'enableConnectionPooler' in o['spec']: + del o['spec']['enableConnectionPooler'] if 'enableReplicaConnectionPooler' in postgresql['spec']: cp = postgresql['spec']['enableReplicaConnectionPooler'] From 0ce9b500c6790337dd8d33d5e9a1673abfda20e3 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Fri, 4 Sep 2020 08:02:27 +0200 Subject: [PATCH 11/40] Fix sync --- pkg/cluster/sync.go | 156 +++++++++++++++++--------------------------- 1 file changed, 60 insertions(+), 96 deletions(-) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index e2109bf93..7b027450e 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -887,10 +887,17 @@ func (c *Cluster) syncConnectionPooler(oldSpec, } } - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec); err != nil { + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, Master); err != nil { c.logger.Errorf("could not sync connection pooler: %v", err) return reason, err } + if newSpec.Spec.EnableReplicaConnectionPooler != nil && + *newSpec.Spec.EnableReplicaConnectionPooler == true { + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, Replica); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + return reason, err + } + } } if oldNeedConnectionPooler && !newNeedConnectionPooler { @@ -922,21 +929,37 @@ func (c *Cluster) syncConnectionPooler(oldSpec, // synchronizing the corresponding deployment, but in case of deployment or // service is missing, create it. After checking, also remember an object for // the future references. -func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql) ( +func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( SyncReason, error) { - masterdeployment, err := c.checkAndCreateConnectionPoolerDeployment(Master, newSpec) - if err != nil { - msg := "could not get connection pooler deployment to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } - replicadeployment, err := c.checkAndCreateConnectionPoolerDeployment(Replica, newSpec) - if err != nil { + deployment, err := c.KubeClient. + Deployments(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Deployment %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return NoSync, fmt.Errorf(msg, err) + } + + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + + c.ConnectionPooler.Deployment = deployment + } else if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - c.ConnectionPooler.Deployment = masterdeployment - c.ConnectionPooler.ReplDeployment = replicadeployment + c.ConnectionPooler.Deployment = deployment // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler @@ -955,28 +978,32 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql newConnectionPooler = &acidv1.ConnectionPooler{} } - c.logger.Infof("Old: %+v, New: %+v", oldConnectionPooler, newConnectionPooler) + c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, masterdeployment) + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) reason := append(specReason, defaultsReason...) if specSync || defaultsSync { - newmasterdeployment, err := c.UpdateConnectionPoolerDeploymentSub(Master, reason[:], newSpec) + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) + + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) if err != nil { - return reason, err + msg := "could not generate deployment for connection pooler: %v" + return reason, fmt.Errorf(msg, err) } - c.ConnectionPooler.Deployment = newmasterdeployment - return reason, nil - } - defaultsSync, defaultsReason = c.needSyncConnectionPoolerDefaults(newConnectionPooler, replicadeployment) - reason = append(specReason, defaultsReason...) - if specSync || defaultsSync { - newreplicadeployment, err := c.UpdateConnectionPoolerDeploymentSub(Replica, reason[:], newSpec) + oldDeploymentSpec := c.ConnectionPooler.Deployment + + deployment, err := c.updateConnectionPoolerDeployment( + oldDeploymentSpec, + newDeploymentSpec) + if err != nil { return reason, err } - c.ConnectionPooler.Deployment = newreplicadeployment + + c.ConnectionPooler.Deployment = deployment return reason, nil } } @@ -985,76 +1012,6 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if newAnnotations != nil { c.updateConnectionPoolerAnnotations(newAnnotations) } - masterservice, err := c.checkAndCreateConnectionPoolerService(Master, newSpec) - if err != nil { - msg := "could not get connection pooler service to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } - replicaservice, err := c.checkAndCreateConnectionPoolerService(Replica, newSpec) - if err != nil { - msg := "could not get connection pooler service to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler.Service = masterservice - c.ConnectionPooler.ReplService = replicaservice - } - - return NoSync, nil -} - -func (c *Cluster) UpdateConnectionPoolerDeploymentSub(role PostgresRole, reason []string, newSpec *acidv1.Postgresql) (*appsv1.Deployment, error) { - - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(role), reason) - - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg := "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - oldDeploymentSpec := c.ConnectionPooler.Deployment - - deployment, err := c.updateConnectionPoolerDeployment( - oldDeploymentSpec, - newDeploymentSpec) - - if err != nil { - return nil, err - } - return deployment, nil -} - -func (c *Cluster) checkAndCreateConnectionPoolerDeployment(role PostgresRole, newSpec *acidv1.Postgresql) (*appsv1.Deployment, error) { - - deployment, err := c.KubeClient. - Deployments(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Deployment %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) - - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - return deployment, nil - } - return deployment, nil -} - -func (c *Cluster) checkAndCreateConnectionPoolerService(role PostgresRole, newSpec *acidv1.Postgresql) (*v1.Service, error) { service, err := c.KubeClient. Services(c.Namespace). @@ -1070,10 +1027,17 @@ func (c *Cluster) checkAndCreateConnectionPoolerService(role PostgresRole, newSp Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) if err != nil { - return nil, err + return NoSync, err } - return service, nil + c.ConnectionPooler.Service = service + } else if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + // Service updates are not supported and probably not that useful anyway + c.ConnectionPooler.Service = service } - return service, nil + + return NoSync, nil } From 6d4bb18c29922d2dbd8afc99f12646de3f2ac396 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Fri, 4 Sep 2020 17:26:54 +0200 Subject: [PATCH 12/40] Adding test cases and other changes - Refactor needConnectionPooler for master and replica separately - Improve sync function - Add test cases to create, delete and sync with repplica connection pooler Other changes --- pkg/cluster/cluster.go | 6 +- pkg/cluster/resources.go | 22 ++++- pkg/cluster/resources_test.go | 72 ++++++++++++++- pkg/cluster/sync.go | 164 ++++++++++++++++++++++------------ pkg/cluster/sync_test.go | 80 ++++++++++++++++- pkg/cluster/util.go | 26 +++--- 6 files changed, 286 insertions(+), 84 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index dff96e47f..1016174b2 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -348,7 +348,7 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - if c.needConnectionPooler() { + if c.needMasterConnectionPooler() || c.needReplicaConnectionPooler() { if c.ConnectionPooler != nil { c.logger.Warning("Connection pooler already exists in the cluster") return nil @@ -650,7 +650,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // initUsers. Check if it needs to be called. sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) && reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) - needConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec) + needConnectionPooler := c.needMasterConnectionPoolerWorker(&newSpec.Spec) if !sameUsers || needConnectionPooler { c.logger.Debugf("syncing secrets") if err := c.initUsers(); err != nil { @@ -915,7 +915,7 @@ func (c *Cluster) initSystemUsers() { // Connection pooler user is an exception, if requested it's going to be // created by operator as a normal pgUser - if c.needConnectionPooler() { + if c.needMasterConnectionPooler() || c.needReplicaConnectionPooler() { // initialize empty connection pooler if not done yet if c.Spec.ConnectionPooler == nil { c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index fb0366bfe..207ed4570 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -126,7 +126,8 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } - if c.Spec.EnableConnectionPooler != nil || c.ConnectionPooler != nil { + if c.needMasterConnectionPooler() { + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Master) if err != nil { msg = "could not generate deployment for connection pooler: %v" @@ -158,9 +159,11 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo } c.logger.Debugf("created new connection pooler %q, uid: %q", util.NameFromMeta(deployment.ObjectMeta), deployment.UID) + } - if c.Spec.EnableReplicaConnectionPooler != nil && *c.Spec.EnableReplicaConnectionPooler == true { + if c.needReplicaConnectionPooler() { + repldeploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Replica) if err != nil { msg = "could not generate deployment for connection pooler: %v" @@ -192,6 +195,7 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo } c.logger.Debugf("created new connection pooler for replica %q, uid: %q", util.NameFromMeta(repldeployment.ObjectMeta), repldeployment.UID) + } return c.ConnectionPooler, nil @@ -211,8 +215,13 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate deploymentName := c.connectionPoolerName(role) - deployment := c.ConnectionPooler.Deployment + var deployment *appsv1.Deployment + if role == Master { + deployment = c.ConnectionPooler.Deployment + } else { + deployment = c.ConnectionPooler.ReplDeployment + } if deployment != nil { deploymentName = deployment.Name } @@ -234,7 +243,12 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { c.logger.Infof("Connection pooler deployment %q has been deleted", deploymentName) // Repeat the same for the service object - service := c.ConnectionPooler.Service + var service *v1.Service + if role == Master { + service = c.ConnectionPooler.Service + } else { + service = c.ConnectionPooler.ReplService + } serviceName := c.connectionPoolerName(role) if service != nil { diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index 144d8a051..45d2f2fd6 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -66,6 +66,31 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { if err != nil { t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) } + + //Check if Replica connection pooler can be create and deleted successfully + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + replpoolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction) + + if err != nil { + t.Errorf("%s: Cannot create replica connection pooler, %s, %+v", + testName, err, replpoolerResources) + } + + if replpoolerResources.ReplDeployment == nil { + t.Errorf("%s: Connection pooler replica deployment is empty", testName) + } + + if replpoolerResources.ReplService == nil { + t.Errorf("%s: Connection pooler replica service is empty", testName) + } + + err = cluster.deleteConnectionPooler(Replica) + if err != nil { + t.Errorf("%s: Cannot delete replica connection pooler, %s", testName, err) + } } func TestNeedConnectionPooler(t *testing.T) { @@ -91,7 +116,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if !cluster.needConnectionPooler() { + if !cluster.needMasterConnectionPooler() { t.Errorf("%s: Connection pooler is not enabled with full definition", testName) } @@ -100,7 +125,7 @@ func TestNeedConnectionPooler(t *testing.T) { EnableConnectionPooler: boolToPointer(true), } - if !cluster.needConnectionPooler() { + if !cluster.needMasterConnectionPooler() { t.Errorf("%s: Connection pooler is not enabled with flag", testName) } @@ -110,7 +135,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if cluster.needConnectionPooler() { + if cluster.needMasterConnectionPooler() { t.Errorf("%s: Connection pooler is still enabled with flag being false", testName) } @@ -120,8 +145,47 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if !cluster.needConnectionPooler() { + if !cluster.needMasterConnectionPooler() { t.Errorf("%s: Connection pooler is not enabled with flag and full", testName) } + + // Test for replica connection pooler + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is not enabled with full definition", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + } + + if !cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is not enabled with flag", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is still enabled with flag being false", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is not enabled with flag and full", + testName) + } } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 7b027450e..92096d1e3 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -560,7 +560,7 @@ func (c *Cluster) syncRoles() (err error) { userNames = append(userNames, u.Name) } - if c.needConnectionPooler() { + if c.needMasterConnectionPooler() || c.needReplicaConnectionPooler() { connectionPoolerUser := c.systemUsers[constants.ConnectionPoolerUserKeyName] userNames = append(userNames, connectionPoolerUser.Name) @@ -845,78 +845,107 @@ func (c *Cluster) syncConnectionPooler(oldSpec, var reason SyncReason var err error + var newNeedConnectionPooler, oldNeedConnectionPooler bool if c.ConnectionPooler == nil { c.ConnectionPooler = &ConnectionPoolerObjects{} } - newNeedConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler := c.needConnectionPoolerWorker(&oldSpec.Spec) + for _, role := range [2]PostgresRole{Master, Replica} { + if role == Master { + newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) + } else { + newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) + } - if newNeedConnectionPooler { - // Try to sync in any case. If we didn't needed connection pooler before, - // it means we want to create it. If it was already present, still sync - // since it could happen that there is no difference in specs, and all - // the resources are remembered, but the deployment was manually deleted - // in between - c.logger.Debug("syncing connection pooler") + if newNeedConnectionPooler { + // Try to sync in any case. If we didn't needed connection pooler before, + // it means we want to create it. If it was already present, still sync + // since it could happen that there is no difference in specs, and all + // the resources are remembered, but the deployment was manually deleted + // in between + c.logger.Debug("syncing connection pooler") - // in this case also do not forget to install lookup function as for - // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler.LookupFunction { - newConnectionPooler := newSpec.Spec.ConnectionPooler + // in this case also do not forget to install lookup function as for + // creating cluster + if !oldNeedConnectionPooler || !c.ConnectionPooler.LookupFunction { + newConnectionPooler := newSpec.Spec.ConnectionPooler - specSchema := "" - specUser := "" + specSchema := "" + specUser := "" - if newConnectionPooler != nil { - specSchema = newConnectionPooler.Schema - specUser = newConnectionPooler.User - } + if newConnectionPooler != nil { + specSchema = newConnectionPooler.Schema + specUser = newConnectionPooler.User + } - schema := util.Coalesce( - specSchema, - c.OpConfig.ConnectionPooler.Schema) + schema := util.Coalesce( + specSchema, + c.OpConfig.ConnectionPooler.Schema) - user := util.Coalesce( - specUser, - c.OpConfig.ConnectionPooler.User) + user := util.Coalesce( + specUser, + c.OpConfig.ConnectionPooler.User) - if err = lookup(schema, user); err != nil { - return NoSync, err + if err = lookup(schema, user); err != nil { + return NoSync, err + } } - } - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, Master); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) - return reason, err - } - if newSpec.Spec.EnableReplicaConnectionPooler != nil && - *newSpec.Spec.EnableReplicaConnectionPooler == true { - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, Replica); err != nil { + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { c.logger.Errorf("could not sync connection pooler: %v", err) return reason, err } } - } - if oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources - for _, role := range [2]PostgresRole{Master, Replica} { - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + if oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources + if role == Master { + + if c.ConnectionPooler != nil && + (c.ConnectionPooler.Deployment != nil || + c.ConnectionPooler.Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + + } else { + + if c.ConnectionPooler != nil && + (c.ConnectionPooler.ReplDeployment != nil || + c.ConnectionPooler.ReplService != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } } } - } - if !oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources if not empty - if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment != nil || - c.ConnectionPooler.Service != nil) { - for _, role := range [2]PostgresRole{Master, Replica} { - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + if !oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources if not empty + if role == Master { + if c.ConnectionPooler != nil && + (c.ConnectionPooler.Deployment != nil || + c.ConnectionPooler.Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + + } + } else { + if c.ConnectionPooler != nil && + (c.ConnectionPooler.ReplDeployment != nil || + c.ConnectionPooler.ReplService != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } } } } @@ -954,13 +983,20 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql return NoSync, err } - c.ConnectionPooler.Deployment = deployment + if role == Master { + c.ConnectionPooler.Deployment = deployment + } else { + c.ConnectionPooler.ReplDeployment = deployment + } } else if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - c.ConnectionPooler.Deployment = deployment - + if role == Master { + c.ConnectionPooler.Deployment = deployment + } else { + c.ConnectionPooler.ReplDeployment = deployment + } // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler newConnectionPooler := newSpec.Spec.ConnectionPooler @@ -983,6 +1019,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) reason := append(specReason, defaultsReason...) + c.logger.Warningf("role and reason %v, %v", role, reason) if specSync || defaultsSync { c.logger.Infof("Update connection pooler deployment %s, reason: %+v", c.connectionPoolerName(role), reason) @@ -1002,8 +1039,11 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return reason, err } - - c.ConnectionPooler.Deployment = deployment + if role == Master { + c.ConnectionPooler.Deployment = deployment + } else { + c.ConnectionPooler.ReplDeployment = deployment + } return reason, nil } } @@ -1029,14 +1069,22 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return NoSync, err } + if role == Master { + c.ConnectionPooler.Service = service + } else { + c.ConnectionPooler.ReplService = service + } - c.ConnectionPooler.Service = service } else if err != nil { msg := "could not get connection pooler service to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler.Service = service + if role == Master { + c.ConnectionPooler.Service = service + } else { + c.ConnectionPooler.ReplService = service + } } return NoSync, nil diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index d9248ae33..af8e928c5 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -32,11 +32,11 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { return fmt.Errorf("Connection pooler resources are empty") } - if cluster.ConnectionPooler.Deployment == nil { + if cluster.ConnectionPooler.Deployment == nil && cluster.ConnectionPooler.ReplDeployment == nil { return fmt.Errorf("Deployment was not saved") } - if cluster.ConnectionPooler.Service == nil { + if cluster.ConnectionPooler.Service == nil && cluster.ConnectionPooler.ReplService == nil { return fmt.Errorf("Service was not saved") } @@ -51,6 +51,24 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { return nil } +func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler != nil && + (cluster.ConnectionPooler.Deployment != nil && cluster.ConnectionPooler.Service != nil) { + return fmt.Errorf("Connection pooler master was not deleted") + } + + return nil +} + +func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler != nil && + (cluster.ConnectionPooler.ReplDeployment != nil && cluster.ConnectionPooler.ReplService != nil) { + return fmt.Errorf("Connection pooler replica was not deleted") + } + + return nil +} + func noEmptySync(cluster *Cluster, err error, reason SyncReason) error { for _, msg := range reason { if strings.HasPrefix(msg, "update [] from '' to '") { @@ -102,6 +120,12 @@ func TestConnectionPoolerSynchronization(t *testing.T) { Deployment: &appsv1.Deployment{}, Service: &v1.Service{}, } + clusterReplicaDirtyMock := newCluster() + clusterReplicaDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() + clusterReplicaDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{ + ReplDeployment: &appsv1.Deployment{}, + ReplService: &v1.Service{}, + } clusterNewDefaultsMock := newCluster() clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient() @@ -147,6 +171,21 @@ func TestConnectionPoolerSynchronization(t *testing.T) { defaultInstances: 1, check: objectsAreSaved, }, + { + subTest: "create replica if doesn't exist with a flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + cluster: clusterReplicaDirtyMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreSaved, + }, { subTest: "create from scratch", oldSpec: &acidv1.Postgresql{ @@ -177,6 +216,43 @@ func TestConnectionPoolerSynchronization(t *testing.T) { defaultInstances: 1, check: objectsAreDeleted, }, + { + subTest: "delete only master if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + EnableConnectionPooler: boolToPointer(false), + }, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyMasterDeleted, + }, + { + subTest: "delete only replica if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyReplicaDeleted, + }, { subTest: "cleanup if still there", oldSpec: &acidv1.Postgresql{ diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 6ffe9899b..332874fe1 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -519,21 +519,21 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { return c.OpConfig.KubernetesUseConfigMaps } -func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - if spec.EnableConnectionPooler != nil { - return *spec.EnableConnectionPooler - } else if spec.EnableReplicaConnectionPooler != nil { - return *spec.EnableReplicaConnectionPooler - } else if spec.ConnectionPooler == nil { - return spec.ConnectionPooler != nil - } - // if the connectionPooler section is there, then we enable even though the - // flags are not there - return true +// isConnectionPoolerEnabled +func (c *Cluster) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) +} + +func (c *Cluster) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return nil != spec.EnableReplicaConnectionPooler && *spec.EnableReplicaConnectionPooler +} + +func (c *Cluster) needMasterConnectionPooler() bool { + return c.needMasterConnectionPoolerWorker(&c.Spec) } -func (c *Cluster) needConnectionPooler() bool { - return c.needConnectionPoolerWorker(&c.Spec) +func (c *Cluster) needReplicaConnectionPooler() bool { + return c.needReplicaConnectionPoolerWorker(&c.Spec) } // Earlier arguments take priority From 6fda51c713f23d10cd9d085e9172aa6f96b90e71 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 8 Sep 2020 17:32:47 +0200 Subject: [PATCH 13/40] Improvements in tests - Fixed the issue with failing test cases - Add more test cases for replica connection pooler - Added docs about the new flag --- docs/reference/cluster_manifest.md | 9 +++- docs/user.md | 8 +++- pkg/cluster/k8sres_test.go | 77 +++++++++++++++++------------- pkg/cluster/sync.go | 36 +++++--------- pkg/cluster/sync_test.go | 2 - 5 files changed, 69 insertions(+), 63 deletions(-) diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 70ab14855..bb28d9d89 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -151,10 +151,15 @@ These parameters are grouped directly under the `spec` key in the manifest. configured (so you can override the operator configuration). Optional. * **enableConnectionPooler** - Tells the operator to create a connection pooler with a database. If this - field is true, a connection pooler deployment will be created even if + Tells the operator to create a connection pooler with a database for the master + service. If this field is true, a connection pooler deployment will be created even if `connectionPooler` section is empty. Optional, not set by default. +* **enableReplicaConnectionPooler** + Tells the operator to create a connection pooler with a database for the replica + service. If this field is true, a connection pooler deployment for replica + will be created even if `connectionPooler` section is empty. Optional, not set by default. + * **enableLogicalBackup** Determines if the logical backup of this cluster should be taken and uploaded to S3. Default: false. Optional. diff --git a/docs/user.md b/docs/user.md index a4b1424b8..7a5e207e6 100644 --- a/docs/user.md +++ b/docs/user.md @@ -737,11 +737,17 @@ manifest: ```yaml spec: enableConnectionPooler: true + enableReplicaConnectionPooler: true ``` This will tell the operator to create a connection pooler with default configuration, through which one can access the master via a separate service -`{cluster-name}-pooler`. In most of the cases the +`{cluster-name}-pooler`. With the first option, connection pooler for master service +is created and with the second option, connection pooler for replica is created. +Note that both of these flags are independent of each other and user can set or +unset any of them as per their requirements without any effect on the other. + +In most of the cases the [default configuration](reference/operator_parameters.md#connection-pooler-configuration) should be good enough. To configure a new connection pooler individually for each Postgres cluster, specify: diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 58f811adc..da3a56d24 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -929,7 +929,7 @@ func TestPodEnvironmentSecretVariables(t *testing.T) { } -func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { +func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"] if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest { return fmt.Errorf("CPU request doesn't match, got %s, expected %s", @@ -957,18 +957,18 @@ func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil } -func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { +func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] - if poolerLabels != cluster.connectionPoolerLabelsSelector(Master).MatchLabels["connection-pooler"] { + if poolerLabels != cluster.connectionPoolerLabelsSelector(role).MatchLabels["connection-pooler"] { return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", - podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(Master).MatchLabels) + podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(role).MatchLabels) } return nil } -func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { +func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { required := map[string]bool{ "PGHOST": false, "PGPORT": false, @@ -1034,14 +1034,14 @@ func TestConnectionPoolerPodSpec(t *testing.T) { }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil } + noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } tests := []struct { subTest string spec *acidv1.PostgresSpec expected error cluster *Cluster - check func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error + check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error }{ { subTest: "default configuration", @@ -1073,7 +1073,8 @@ func TestConnectionPoolerPodSpec(t *testing.T) { { subTest: "labels for service", spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), }, expected: nil, cluster: cluster, @@ -1089,20 +1090,23 @@ func TestConnectionPoolerPodSpec(t *testing.T) { check: testEnvs, }, } - for _, tt := range tests { - podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec, Master) + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec, role) - if err != tt.expected && err.Error() != tt.expected.Error() { - t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", - testName, tt.subTest, err, tt.expected) - } + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } - err = tt.check(cluster, podSpec) - if err != nil { - t.Errorf("%s [%s]: Pod spec is incorrect, %+v", - testName, tt.subTest, err) + err = tt.check(cluster, podSpec, role) + if err != nil { + t.Errorf("%s [%s]: Pod spec is incorrect, %+v", + testName, tt.subTest, err) + } } } + } func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployment) error { @@ -1166,7 +1170,8 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { { subTest: "default configuration", spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), }, expected: nil, cluster: cluster, @@ -1175,7 +1180,8 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { { subTest: "owner reference", spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), }, expected: nil, cluster: cluster, @@ -1184,7 +1190,8 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { { subTest: "selector", spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), }, expected: nil, cluster: cluster, @@ -1205,9 +1212,10 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { testName, tt.subTest, err) } } + } -func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error { +func testServiceOwnwerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { owner := service.ObjectMeta.OwnerReferences[0] if owner.Name != cluster.Statefulset.ObjectMeta.Name { @@ -1218,12 +1226,12 @@ func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error { return nil } -func testServiceSelector(cluster *Cluster, service *v1.Service) error { +func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRole) error { selector := service.Spec.Selector - if selector["connection-pooler"] != cluster.connectionPoolerName(Master) { + if selector["connection-pooler"] != cluster.connectionPoolerName(role) { return fmt.Errorf("Selector is incorrect, got %s, expected %s", - selector["connection-pooler"], cluster.connectionPoolerName(Master)) + selector["connection-pooler"], cluster.connectionPoolerName(role)) } return nil @@ -1253,7 +1261,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { }, } - noCheck := func(cluster *Cluster, deployment *v1.Service) error { + noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error { return nil } @@ -1261,7 +1269,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { subTest string spec *acidv1.PostgresSpec cluster *Cluster - check func(cluster *Cluster, deployment *v1.Service) error + check func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error }{ { subTest: "default configuration", @@ -1282,18 +1290,21 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { { subTest: "selector", spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), }, cluster: cluster, check: testServiceSelector, }, } - for _, tt := range tests { - service := tt.cluster.generateConnectionPoolerService(tt.spec, Master) + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + service := tt.cluster.generateConnectionPoolerService(tt.spec, role) - if err := tt.check(cluster, service); err != nil { - t.Errorf("%s [%s]: Service spec is incorrect, %+v", - testName, tt.subTest, err) + if err := tt.check(cluster, service, role); err != nil { + t.Errorf("%s [%s]: Service spec is incorrect, %+v", + testName, tt.subTest, err) + } } } } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 92096d1e3..82eb29f3b 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -847,10 +847,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, var err error var newNeedConnectionPooler, oldNeedConnectionPooler bool - if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} - } - + // Check and perform the sync requirements for each of the roles. for _, role := range [2]PostgresRole{Master, Replica} { if role == Master { newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) @@ -859,6 +856,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) } + if c.ConnectionPooler == nil { + c.ConnectionPooler = &ConnectionPoolerObjects{} + } if newNeedConnectionPooler { // Try to sync in any case. If we didn't needed connection pooler before, @@ -902,27 +902,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources - if role == Master { - - if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment != nil || - c.ConnectionPooler.Service != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } - - } else { - - if c.ConnectionPooler != nil && - (c.ConnectionPooler.ReplDeployment != nil || - c.ConnectionPooler.ReplService != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) } } @@ -937,6 +918,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, c.logger.Warningf("could not remove connection pooler: %v", err) } + } else if c.ConnectionPooler.ReplDeployment == nil && c.ConnectionPooler.ReplService == nil { + c.ConnectionPooler = nil } } else { if c.ConnectionPooler != nil && @@ -946,6 +929,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if err = c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } + } else if c.ConnectionPooler.Deployment == nil && c.ConnectionPooler.Service == nil { + c.ConnectionPooler = nil } } } @@ -995,6 +980,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if role == Master { c.ConnectionPooler.Deployment = deployment } else { + c.ConnectionPooler.ReplDeployment = deployment } // actual synchronization diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index af8e928c5..1a9132443 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -225,9 +225,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { }, newSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, EnableReplicaConnectionPooler: boolToPointer(true), - EnableConnectionPooler: boolToPointer(false), }, }, cluster: clusterMock, From a03397a8e985bfd2f4361a8caa39f0a2330dbbf1 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Fri, 11 Sep 2020 10:17:43 +0200 Subject: [PATCH 14/40] Resolve review comments --- pkg/cluster/resources.go | 15 ++++++++++----- pkg/cluster/sync.go | 4 +--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 207ed4570..44ee4b674 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -189,13 +189,18 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo if err != nil { return nil, err } - c.ConnectionPooler = &ConnectionPoolerObjects{ - ReplDeployment: repldeployment, - ReplService: replService, + + if c.needMasterConnectionPooler() { + c.ConnectionPooler.ReplDeployment = repldeployment + c.ConnectionPooler.ReplService = replService + } else { + c.ConnectionPooler = &ConnectionPoolerObjects{ + ReplDeployment: repldeployment, + ReplService: replService, + } } c.logger.Debugf("created new connection pooler for replica %q, uid: %q", util.NameFromMeta(repldeployment.ObjectMeta), repldeployment.UID) - } return c.ConnectionPooler, nil @@ -214,7 +219,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate - deploymentName := c.connectionPoolerName(role) + var deploymentName string var deployment *appsv1.Deployment if role == Master { diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 82eb29f3b..1ad7a3e91 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -1005,10 +1005,8 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) reason := append(specReason, defaultsReason...) - c.logger.Warningf("role and reason %v, %v", role, reason) + if specSync || defaultsSync { - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(role), reason) newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) if err != nil { From c692ca279e94d02a833909e8be99c7680866e503 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Fri, 11 Sep 2020 10:22:29 +0200 Subject: [PATCH 15/40] Minor fix --- pkg/cluster/k8sres.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 33f9234b6..ecbe03212 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2342,8 +2342,10 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, rol TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, }, }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{"connection-pooler": c.connectionPoolerName(role)}, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(role), + }, } service := &v1.Service{ From 175f0c55ce5f84e21aad6ac6b1a919ba2b5090d1 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 21 Sep 2020 16:40:40 +0200 Subject: [PATCH 16/40] Refactor needConnectionPooler Have one unified function to tell if any connection pooler is required Add a helper function to list the roles that require connection pooler, helps in avoiding duplication of code --- docs/reference/cluster_manifest.md | 10 ++- pkg/cluster/cluster.go | 11 ++- pkg/cluster/resources.go | 126 +++++++++-------------------- pkg/cluster/sync.go | 74 +++++------------ pkg/cluster/util.go | 23 +++++- 5 files changed, 92 insertions(+), 152 deletions(-) diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index bb28d9d89..f7ddb6ff1 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -246,10 +246,10 @@ explanation of `ttl` and `loop_wait` parameters. * **synchronous_mode** Patroni `synchronous_mode` parameter value. The default is set to `false`. Optional. - + * **synchronous_mode_strict** Patroni `synchronous_mode_strict` parameter value. Can be used in addition to `synchronous_mode`. The default is set to `false`. Optional. - + ## Postgres container resources Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) @@ -402,8 +402,10 @@ CPU and memory limits for the sidecar container. Parameters are grouped under the `connectionPooler` top-level key and specify configuration for connection pooler. If this section is not empty, a connection -pooler will be created for a database even if `enableConnectionPooler` is not -present. +pooler will be created for master service only even if `enableConnectionPooler` +is not present. But if this section is present then it defines the configuration +for both master and replica pooler services (if `enableReplicaConnectionPooler` + is enabled). * **numberOfInstances** How many instances of connection pooler to create. diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1016174b2..27461d1aa 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -55,10 +55,8 @@ type Config struct { // K8S objects that are belongs to a connection pooler type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - ReplDeployment *appsv1.Deployment - Service *v1.Service - ReplService *v1.Service + Deployment map[PostgresRole]*appsv1.Deployment + Service map[PostgresRole]*v1.Service // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. @@ -348,7 +346,8 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - if c.needMasterConnectionPooler() || c.needReplicaConnectionPooler() { + roles := c.RolesConnectionPooler() + for _, r := range roles { if c.ConnectionPooler != nil { c.logger.Warning("Connection pooler already exists in the cluster") return nil @@ -359,7 +358,7 @@ func (c *Cluster) Create() error { return nil } c.logger.Infof("connection pooler %q has been successfully created", - util.NameFromMeta(connectionPooler.Deployment.ObjectMeta)) + util.NameFromMeta(connectionPooler.Deployment[r].ObjectMeta)) } return nil diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 44ee4b674..8b7b63e31 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -126,81 +126,40 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } - if c.needMasterConnectionPooler() { - - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Master) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - - serviceSpec := c.generateConnectionPoolerService(&c.Spec, Master) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - c.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: deployment, - Service: service, - } - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - - } - - if c.needReplicaConnectionPooler() { - - repldeploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Replica) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - repldeployment, err := c.KubeClient. - Deployments(repldeploymentSpec.Namespace). - Create(context.TODO(), repldeploymentSpec, metav1.CreateOptions{}) + if c.needConnectionPooler() { + roles := c.RolesConnectionPooler() + for _, r := range roles { + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, r) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } - if err != nil { - return nil, err - } + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - replServiceSpec := c.generateConnectionPoolerService(&c.Spec, Replica) - replService, err := c.KubeClient. - Services(replServiceSpec.Namespace). - Create(context.TODO(), replServiceSpec, metav1.CreateOptions{}) + if err != nil { + return nil, err + } - if err != nil { - return nil, err - } + serviceSpec := c.generateConnectionPoolerService(&c.Spec, r) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - if c.needMasterConnectionPooler() { - c.ConnectionPooler.ReplDeployment = repldeployment - c.ConnectionPooler.ReplService = replService - } else { - c.ConnectionPooler = &ConnectionPoolerObjects{ - ReplDeployment: repldeployment, - ReplService: replService, + if err != nil { + return nil, err } + c.ConnectionPooler.Deployment[r] = deployment + c.ConnectionPooler.Service[r] = service + + c.logger.Debugf("created new connection pooler %q, uid: %q", + util.NameFromMeta(deployment.ObjectMeta), deployment.UID) } - c.logger.Debugf("created new connection pooler for replica %q, uid: %q", - util.NameFromMeta(repldeployment.ObjectMeta), repldeployment.UID) } return c.ConnectionPooler, nil @@ -221,12 +180,8 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // is somehow empty, try to delete based on what would we generate var deploymentName string var deployment *appsv1.Deployment + deployment = c.ConnectionPooler.Deployment[role] - if role == Master { - deployment = c.ConnectionPooler.Deployment - } else { - deployment = c.ConnectionPooler.ReplDeployment - } if deployment != nil { deploymentName = deployment.Name } @@ -249,11 +204,8 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Repeat the same for the service object var service *v1.Service - if role == Master { - service = c.ConnectionPooler.Service - } else { - service = c.ConnectionPooler.ReplService - } + service = c.ConnectionPooler.Service[role] + serviceName := c.connectionPoolerName(role) if service != nil { @@ -909,9 +861,9 @@ func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget { // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. -func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { +func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler.Deployment == nil { + if c.ConnectionPooler == nil || c.ConnectionPooler.Deployment[role] == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } @@ -924,9 +876,9 @@ func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeploym // worker at one time will try to update it chances of conflicts are // minimal. deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler.Deployment.Namespace).Patch( + Deployments(c.ConnectionPooler.Deployment[role].Namespace).Patch( context.TODO(), - c.ConnectionPooler.Deployment.Name, + c.ConnectionPooler.Deployment[role].Name, types.MergePatchType, patchData, metav1.PatchOptions{}, @@ -935,21 +887,21 @@ func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeploym return nil, fmt.Errorf("could not patch deployment: %v", err) } - c.ConnectionPooler.Deployment = deployment + c.ConnectionPooler.Deployment[role] = deployment return deployment, nil } //updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]string) (*appsv1.Deployment, error) { +func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { c.logger.Debugf("updating connection pooler annotations") patchData, err := metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) } - result, err := c.KubeClient.Deployments(c.ConnectionPooler.Deployment.Namespace).Patch( + result, err := c.KubeClient.Deployments(c.ConnectionPooler.Deployment[role].Namespace).Patch( context.TODO(), - c.ConnectionPooler.Deployment.Name, + c.ConnectionPooler.Deployment[role].Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}, diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 1ad7a3e91..9863ea3d7 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -848,7 +848,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, var newNeedConnectionPooler, oldNeedConnectionPooler bool // Check and perform the sync requirements for each of the roles. - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range c.RolesConnectionPooler() { if role == Master { newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) @@ -858,6 +858,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, } if c.ConnectionPooler == nil { c.ConnectionPooler = &ConnectionPoolerObjects{} + c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) + c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) } if newNeedConnectionPooler { @@ -909,28 +911,12 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if !oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources if not empty - if role == Master { - if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment != nil || - c.ConnectionPooler.Service != nil) { + if c.ConnectionPooler != nil && + (c.ConnectionPooler.Deployment[role] != nil || + c.ConnectionPooler.Service[role] != nil) { - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - - } else if c.ConnectionPooler.ReplDeployment == nil && c.ConnectionPooler.ReplService == nil { - c.ConnectionPooler = nil - } - } else { - if c.ConnectionPooler != nil && - (c.ConnectionPooler.ReplDeployment != nil || - c.ConnectionPooler.ReplService != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } else if c.ConnectionPooler.Deployment == nil && c.ConnectionPooler.Service == nil { - c.ConnectionPooler = nil + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) } } } @@ -967,22 +953,13 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return NoSync, err } - - if role == Master { - c.ConnectionPooler.Deployment = deployment - } else { - c.ConnectionPooler.ReplDeployment = deployment - } + c.ConnectionPooler.Deployment[role] = deployment } else if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - if role == Master { - c.ConnectionPooler.Deployment = deployment - } else { + c.ConnectionPooler.Deployment[role] = deployment - c.ConnectionPooler.ReplDeployment = deployment - } // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler newConnectionPooler := newSpec.Spec.ConnectionPooler @@ -1007,34 +984,33 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql reason := append(specReason, defaultsReason...) if specSync || defaultsSync { - + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) if err != nil { msg := "could not generate deployment for connection pooler: %v" return reason, fmt.Errorf(msg, err) } - oldDeploymentSpec := c.ConnectionPooler.Deployment + oldDeploymentSpec := c.ConnectionPooler.Deployment[role] deployment, err := c.updateConnectionPoolerDeployment( oldDeploymentSpec, - newDeploymentSpec) + newDeploymentSpec, + role) if err != nil { return reason, err } - if role == Master { - c.ConnectionPooler.Deployment = deployment - } else { - c.ConnectionPooler.ReplDeployment = deployment - } + c.ConnectionPooler.Deployment[role] = deployment + return reason, nil } } - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler.Deployment.Annotations) + newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler.Deployment[role].Annotations) if newAnnotations != nil { - c.updateConnectionPoolerAnnotations(newAnnotations) + c.updateConnectionPoolerAnnotations(newAnnotations, role) } service, err := c.KubeClient. @@ -1053,22 +1029,14 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return NoSync, err } - if role == Master { - c.ConnectionPooler.Service = service - } else { - c.ConnectionPooler.ReplService = service - } + c.ConnectionPooler.Service[role] = service } else if err != nil { msg := "could not get connection pooler service to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { // Service updates are not supported and probably not that useful anyway - if role == Master { - c.ConnectionPooler.Service = service - } else { - c.ConnectionPooler.ReplService = service - } + c.ConnectionPooler.Service[role] = service } return NoSync, nil diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 332874fe1..da82f36af 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -521,17 +521,36 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { // isConnectionPoolerEnabled func (c *Cluster) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) + return (spec.EnableConnectionPooler != nil && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) } func (c *Cluster) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return nil != spec.EnableReplicaConnectionPooler && *spec.EnableReplicaConnectionPooler + return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler } func (c *Cluster) needMasterConnectionPooler() bool { return c.needMasterConnectionPoolerWorker(&c.Spec) } +func (c *Cluster) needConnectionPooler() bool { + return c.needMasterConnectionPoolerWorker(&c.Spec) || c.needReplicaConnectionPoolerWorker(&c.Spec) +} + +// RolesConnectionPooler gives the list of roles which need connection pooler +func (c *Cluster) RolesConnectionPooler() []PostgresRole { + roles := []PostgresRole{} + i := 0 + + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles[i] = Master + i = i + 1 + } + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles[i] = Replica + } + return roles +} + func (c *Cluster) needReplicaConnectionPooler() bool { return c.needReplicaConnectionPoolerWorker(&c.Spec) } From fea359f3d58bc593f8bee6be2f45c6d483e9b7c7 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 21 Sep 2020 22:55:10 +0200 Subject: [PATCH 17/40] Update tests --- pkg/cluster/resources.go | 2 ++ pkg/cluster/resources_test.go | 49 ++++++++---------------------- pkg/cluster/sync.go | 23 +++++++++++++- pkg/cluster/sync_test.go | 56 ++++++++++++++++++++++------------- pkg/cluster/util.go | 10 +++---- 5 files changed, 76 insertions(+), 64 deletions(-) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 8b7b63e31..bb4c8a7b4 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -107,6 +107,8 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo if c.ConnectionPooler == nil { c.ConnectionPooler = &ConnectionPoolerObjects{} + c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) + c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) } schema := c.Spec.ConnectionPooler.Schema diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index 45d2f2fd6..60755b909 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -54,42 +54,19 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { testName, err, poolerResources) } - if poolerResources.Deployment == nil { - t.Errorf("%s: Connection pooler deployment is empty", testName) - } - - if poolerResources.Service == nil { - t.Errorf("%s: Connection pooler service is empty", testName) - } - - err = cluster.deleteConnectionPooler(Master) - if err != nil { - t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) - } - - //Check if Replica connection pooler can be create and deleted successfully - cluster.Spec = acidv1.PostgresSpec{ - EnableReplicaConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - replpoolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction) - - if err != nil { - t.Errorf("%s: Cannot create replica connection pooler, %s, %+v", - testName, err, replpoolerResources) - } - - if replpoolerResources.ReplDeployment == nil { - t.Errorf("%s: Connection pooler replica deployment is empty", testName) - } - - if replpoolerResources.ReplService == nil { - t.Errorf("%s: Connection pooler replica service is empty", testName) - } - - err = cluster.deleteConnectionPooler(Replica) - if err != nil { - t.Errorf("%s: Cannot delete replica connection pooler, %s", testName, err) + for _, role := range cluster.RolesConnectionPooler() { + if poolerResources.Deployment[role] == nil { + t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) + } + + if poolerResources.Service[role] == nil { + t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) + } + + err = cluster.deleteConnectionPooler(role) + if err != nil { + t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) + } } } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 9863ea3d7..4e2805a3f 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -848,7 +848,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, var newNeedConnectionPooler, oldNeedConnectionPooler bool // Check and perform the sync requirements for each of the roles. - for _, role := range c.RolesConnectionPooler() { + for _, role := range [2]PostgresRole{Master, Replica} { if role == Master { newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) @@ -904,13 +904,32 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources + otherRole := role + if len(c.RolesConnectionPooler()) == 2 { + if role == Master { + otherRole = Replica + } else { + otherRole = Master + } + } if err = c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } + if c.ConnectionPooler != nil && c.ConnectionPooler.Deployment[otherRole] == nil && c.ConnectionPooler.Service[otherRole] == nil { + c.ConnectionPooler = nil + } } if !oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources if not empty + otherRole := role + if len(c.RolesConnectionPooler()) == 2 { + if role == Master { + otherRole = Replica + } else { + otherRole = Master + } + } if c.ConnectionPooler != nil && (c.ConnectionPooler.Deployment[role] != nil || c.ConnectionPooler.Service[role] != nil) { @@ -918,6 +937,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if err = c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } + } else if c.ConnectionPooler.Deployment[otherRole] == nil && c.ConnectionPooler.Service[otherRole] == nil { + c.ConnectionPooler = nil } } } diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index 1a9132443..64aa03678 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -19,11 +19,13 @@ func int32ToPointer(value int32) *int32 { } func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler.Deployment.Spec.Replicas == nil || - *cluster.ConnectionPooler.Deployment.Spec.Replicas != 2 { - return fmt.Errorf("Wrong nubmer of instances") + for _, role := range cluster.RolesConnectionPooler() { + if cluster.ConnectionPooler.Deployment[role] != nil && + (cluster.ConnectionPooler.Deployment[role].Spec.Replicas == nil || + *cluster.ConnectionPooler.Deployment[role].Spec.Replicas != 2) { + return fmt.Errorf("Wrong number of instances") + } } - return nil } @@ -32,12 +34,16 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { return fmt.Errorf("Connection pooler resources are empty") } - if cluster.ConnectionPooler.Deployment == nil && cluster.ConnectionPooler.ReplDeployment == nil { - return fmt.Errorf("Deployment was not saved") - } + for _, role := range cluster.RolesConnectionPooler() { + if role != "" { + if cluster.ConnectionPooler.Deployment[role] == nil { + return fmt.Errorf("Deployment was not saved %s", role) + } - if cluster.ConnectionPooler.Service == nil && cluster.ConnectionPooler.ReplService == nil { - return fmt.Errorf("Service was not saved") + if cluster.ConnectionPooler.Service[role] == nil { + return fmt.Errorf("Service was not saved %s", role) + } + } } return nil @@ -52,20 +58,24 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { } func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler != nil && - (cluster.ConnectionPooler.Deployment != nil && cluster.ConnectionPooler.Service != nil) { - return fmt.Errorf("Connection pooler master was not deleted") - } + for _, role := range cluster.RolesConnectionPooler() { + if cluster.ConnectionPooler != nil && + (cluster.ConnectionPooler.Deployment[role] != nil && cluster.ConnectionPooler.Service[role] != nil) { + return fmt.Errorf("Connection pooler master was not deleted") + } + } return nil } func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler != nil && - (cluster.ConnectionPooler.ReplDeployment != nil && cluster.ConnectionPooler.ReplService != nil) { - return fmt.Errorf("Connection pooler replica was not deleted") - } + for _, role := range cluster.RolesConnectionPooler() { + if cluster.ConnectionPooler != nil && + (cluster.ConnectionPooler.Deployment[role] != nil && cluster.ConnectionPooler.Service[role] != nil) { + return fmt.Errorf("Connection pooler replica was not deleted") + } + } return nil } @@ -117,16 +127,20 @@ func TestConnectionPoolerSynchronization(t *testing.T) { clusterDirtyMock := newCluster() clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: &appsv1.Deployment{}, - Service: &v1.Service{}, + Deployment: make(map[PostgresRole]*appsv1.Deployment), + Service: make(map[PostgresRole]*v1.Service), } + clusterDirtyMock.ConnectionPooler.Deployment[Master] = &appsv1.Deployment{} + clusterDirtyMock.ConnectionPooler.Service[Master] = &v1.Service{} clusterReplicaDirtyMock := newCluster() clusterReplicaDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() clusterReplicaDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{ - ReplDeployment: &appsv1.Deployment{}, - ReplService: &v1.Service{}, + Deployment: make(map[PostgresRole]*appsv1.Deployment), + Service: make(map[PostgresRole]*v1.Service), } + clusterDirtyMock.ConnectionPooler.Deployment[Replica] = &appsv1.Deployment{} + clusterDirtyMock.ConnectionPooler.Service[Replica] = &v1.Service{} clusterNewDefaultsMock := newCluster() clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient() diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index da82f36af..b9ce6dbe9 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -521,7 +521,7 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { // isConnectionPoolerEnabled func (c *Cluster) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return (spec.EnableConnectionPooler != nil && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) } func (c *Cluster) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { @@ -538,15 +538,13 @@ func (c *Cluster) needConnectionPooler() bool { // RolesConnectionPooler gives the list of roles which need connection pooler func (c *Cluster) RolesConnectionPooler() []PostgresRole { - roles := []PostgresRole{} - i := 0 + roles := make([]PostgresRole, 2) if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles[i] = Master - i = i + 1 + roles = append(roles, Master) } if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles[i] = Replica + roles = append(roles, Replica) } return roles } From 6272377da5555ce9ca0c152154ed815eb7e10d09 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 23 Sep 2020 11:19:01 +0200 Subject: [PATCH 18/40] Add sync test --- pkg/cluster/cluster.go | 2 +- pkg/cluster/sync_test.go | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 27461d1aa..adf6e30e8 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -914,7 +914,7 @@ func (c *Cluster) initSystemUsers() { // Connection pooler user is an exception, if requested it's going to be // created by operator as a normal pgUser - if c.needMasterConnectionPooler() || c.needReplicaConnectionPooler() { + if c.needConnectionPooler() { // initialize empty connection pooler if not done yet if c.Spec.ConnectionPooler == nil { c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index 64aa03678..fa93cf45a 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -215,6 +215,23 @@ func TestConnectionPoolerSynchronization(t *testing.T) { defaultInstances: 1, check: objectsAreSaved, }, + { + subTest: "create both master and replica", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + EnableConnectionPooler: boolToPointer(true), + }, + }, + cluster: clusterMissingObjects, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreSaved, + }, { subTest: "delete if not needed", oldSpec: &acidv1.Postgresql{ @@ -234,7 +251,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) { subTest: "delete only master if not needed", oldSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableConnectionPooler: boolToPointer(true), }, }, newSpec: &acidv1.Postgresql{ From 3ba73fbcb7031b4ec8abfaf2f54f672ebf57b4b3 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 23 Sep 2020 17:09:36 +0200 Subject: [PATCH 19/40] Add and update tests --- pkg/cluster/resources_test.go | 3 +- pkg/cluster/sync.go | 9 +++- pkg/cluster/sync_test.go | 91 ++++++++++++++++++++++++++--------- 3 files changed, 76 insertions(+), 27 deletions(-) diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index 60755b909..810119c89 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -45,7 +45,8 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { } cluster.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), } poolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 4e2805a3f..c4a788970 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -912,8 +912,13 @@ func (c *Cluster) syncConnectionPooler(oldSpec, otherRole = Master } } - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + if c.ConnectionPooler != nil && + (c.ConnectionPooler.Deployment[role] != nil || + c.ConnectionPooler.Service[role] != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } } if c.ConnectionPooler != nil && c.ConnectionPooler.Deployment[otherRole] == nil && c.ConnectionPooler.Service[otherRole] == nil { c.ConnectionPooler = nil diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index fa93cf45a..491329265 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -34,21 +34,51 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { return fmt.Errorf("Connection pooler resources are empty") } - for _, role := range cluster.RolesConnectionPooler() { - if role != "" { - if cluster.ConnectionPooler.Deployment[role] == nil { - return fmt.Errorf("Deployment was not saved %s", role) - } - - if cluster.ConnectionPooler.Service[role] == nil { - return fmt.Errorf("Service was not saved %s", role) - } + for _, role := range []PostgresRole{Master, Replica} { + if cluster.ConnectionPooler.Deployment[role] == nil { + return fmt.Errorf("Deployment was not saved %s", role) + } + + if cluster.ConnectionPooler.Service[role] == nil { + return fmt.Errorf("Service was not saved %s", role) } } return nil } +func MasterobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + if cluster.ConnectionPooler.Deployment[Master] == nil { + return fmt.Errorf("Deployment was not saved") + } + + if cluster.ConnectionPooler.Service[Master] == nil { + return fmt.Errorf("Service was not saved") + } + + return nil +} + +func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + if cluster.ConnectionPooler.Deployment[Replica] == nil { + return fmt.Errorf("Deployment was not saved") + } + + if cluster.ConnectionPooler.Service[Replica] == nil { + return fmt.Errorf("Service was not saved") + } + + return nil +} + func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { if cluster.ConnectionPooler != nil { return fmt.Errorf("Connection pooler was not deleted") @@ -59,22 +89,18 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range cluster.RolesConnectionPooler() { - if cluster.ConnectionPooler != nil && - (cluster.ConnectionPooler.Deployment[role] != nil && cluster.ConnectionPooler.Service[role] != nil) { - return fmt.Errorf("Connection pooler master was not deleted") - } + if cluster.ConnectionPooler != nil && + (cluster.ConnectionPooler.Deployment[Master] != nil || cluster.ConnectionPooler.Service[Master] != nil) { + return fmt.Errorf("Connection pooler master was not deleted") } return nil } func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range cluster.RolesConnectionPooler() { - if cluster.ConnectionPooler != nil && - (cluster.ConnectionPooler.Deployment[role] != nil && cluster.ConnectionPooler.Service[role] != nil) { - return fmt.Errorf("Connection pooler replica was not deleted") - } + if cluster.ConnectionPooler != nil && + (cluster.ConnectionPooler.Deployment[Replica] != nil || cluster.ConnectionPooler.Service[Replica] != nil) { + return fmt.Errorf("Connection pooler replica was not deleted") } return nil } @@ -141,6 +167,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { clusterDirtyMock.ConnectionPooler.Deployment[Replica] = &appsv1.Deployment{} clusterDirtyMock.ConnectionPooler.Service[Replica] = &v1.Service{} + clusterNewDefaultsMock := newCluster() clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient() @@ -168,7 +195,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { cluster: clusterMissingObjects, defaultImage: "pooler:1.0", defaultInstances: 1, - check: objectsAreSaved, + check: MasterobjectsAreSaved, }, { subTest: "create if doesn't exist with a flag", @@ -183,7 +210,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { cluster: clusterMissingObjects, defaultImage: "pooler:1.0", defaultInstances: 1, - check: objectsAreSaved, + check: MasterobjectsAreSaved, }, { subTest: "create replica if doesn't exist with a flag", @@ -192,13 +219,29 @@ func TestConnectionPoolerSynchronization(t *testing.T) { }, newSpec: &acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, EnableReplicaConnectionPooler: boolToPointer(true), }, }, - cluster: clusterReplicaDirtyMock, + cluster: clusterDirtyMock, defaultImage: "pooler:1.0", defaultInstances: 1, - check: objectsAreSaved, + check: ReplicaobjectsAreSaved, + }, + { + subTest: "create no replica with flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + }, + }, + cluster: clusterDirtyMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, }, { subTest: "create from scratch", @@ -213,7 +256,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { cluster: clusterMissingObjects, defaultImage: "pooler:1.0", defaultInstances: 1, - check: objectsAreSaved, + check: MasterobjectsAreSaved, }, { subTest: "create both master and replica", From bb51abadb8e5dada3891644ab42129db14a2b0e7 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 5 Oct 2020 12:06:58 +0200 Subject: [PATCH 20/40] Cleanup deleteConnectionPooler --- pkg/cluster/resources.go | 56 +++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 30 deletions(-) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index bb4c8a7b4..717a7f45f 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -180,52 +180,48 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate - var deploymentName string var deployment *appsv1.Deployment deployment = c.ConnectionPooler.Deployment[role] - if deployment != nil { - deploymentName = deployment.Name - } - - // set delete propagation policy to foreground, so that replica set will be - // also deleted. policy := metav1.DeletePropagationForeground options := metav1.DeleteOptions{PropagationPolicy: &policy} - err = c.KubeClient. - Deployments(c.Namespace). - Delete(context.TODO(), deploymentName, options) - if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler deployment was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete deployment: %v", err) - } + if deployment != nil { + + // set delete propagation policy to foreground, so that replica set will be + // also deleted. + + err = c.KubeClient. + Deployments(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler deployment was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete deployment: %v", err) + } - c.logger.Infof("Connection pooler deployment %q has been deleted", deploymentName) + c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) + } // Repeat the same for the service object var service *v1.Service service = c.ConnectionPooler.Service[role] - serviceName := c.connectionPoolerName(role) - if service != nil { - serviceName = service.Name - } - err = c.KubeClient. - Services(c.Namespace). - Delete(context.TODO(), serviceName, options) + err = c.KubeClient. + Services(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) - if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler service was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete service: %v", err) - } - - c.logger.Infof("Connection pooler service %q has been deleted", serviceName) + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler service was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete service: %v", err) + } + c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) + } // Repeat the same for the secret object secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) From 84fbfe3063996606039b334db713eec3a3d110ed Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 5 Oct 2020 12:10:02 +0200 Subject: [PATCH 21/40] Add labels --- pkg/cluster/util.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index b9ce6dbe9..f00086e50 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -424,8 +424,10 @@ func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.Labe connectionPoolerLabels := labels.Set(map[string]string{}) extraLabels := labels.Set(map[string]string{ - "connection-pooler": c.connectionPoolerName(role), - "application": "db-connection-pooler", + "connection-pooler-name": c.connectionPoolerName(role), + "application": "db-connection-pooler", + "role": string(role), + "cluster-name": c.ClusterName, }) connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) From c63446c4140b22067e9ed9b9c9cd9e16abfb7cfa Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 5 Oct 2020 12:48:54 +0200 Subject: [PATCH 22/40] First commit --- pkg/cluster/cluster.go | 27 ++++++------- pkg/cluster/database.go | 4 +- pkg/cluster/resources.go | 86 ++++++++++++++++++---------------------- pkg/cluster/sync.go | 36 ++++++++--------- pkg/cluster/types.go | 2 +- 5 files changed, 73 insertions(+), 82 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index adf6e30e8..940af7e5b 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -55,8 +55,8 @@ type Config struct { // K8S objects that are belongs to a connection pooler type ConnectionPoolerObjects struct { - Deployment map[PostgresRole]*appsv1.Deployment - Service map[PostgresRole]*v1.Service + Deployment *appsv1.Deployment + Service *v1.Service // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. @@ -72,7 +72,6 @@ type kubeResources struct { Endpoints map[PostgresRole]*v1.Endpoints Secrets map[types.UID]*v1.Secret Statefulset *appsv1.StatefulSet - ConnectionPooler *ConnectionPoolerObjects PodDisruptionBudget *policybeta1.PodDisruptionBudget //Pods are treated separately //PVCs are treated separately @@ -102,7 +101,7 @@ type Cluster struct { currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex - + ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects } type compareStatefulsetResult struct { @@ -346,19 +345,19 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - roles := c.RolesConnectionPooler() - for _, r := range roles { - if c.ConnectionPooler != nil { + for _, r := range c.RolesConnectionPooler() { + if c.ConnectionPooler[r] != nil { c.logger.Warning("Connection pooler already exists in the cluster") return nil + + connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) + if err != nil { + c.logger.Warningf("could not create connection pooler: %v", err) + return nil + } + c.logger.Infof("connection pooler %q has been successfully created for the role %v", + util.NameFromMeta(connectionPooler.Deployment.ObjectMeta), r) } - connectionPooler, err := c.createConnectionPooler(c.installLookupFunction) - if err != nil { - c.logger.Warningf("could not create connection pooler: %v", err) - return nil - } - c.logger.Infof("connection pooler %q has been successfully created", - util.NameFromMeta(connectionPooler.Deployment[r].ObjectMeta)) } return nil diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index 1a38bd41d..d12360c27 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -463,7 +463,7 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi // Creates a connection pool credentials lookup function in every database to // perform remote authentication. -func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { +func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role PostgresRole) error { var stmtBytes bytes.Buffer c.logger.Info("Installing lookup function") @@ -542,6 +542,6 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { } } - c.ConnectionPooler.LookupFunction = true + c.ConnectionPooler[role].LookupFunction = true return nil } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 717a7f45f..419013339 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -101,16 +101,10 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { // // After that create all the objects for connection pooler, namely a deployment // with a chosen pooler and a service to expose it. -func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoolerObjects, error) { +func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { var msg string c.setProcessName("creating connection pooler") - if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} - c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) - c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) - } - schema := c.Spec.ConnectionPooler.Schema if schema == "" { @@ -122,49 +116,47 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo user = c.OpConfig.ConnectionPooler.User } - err := lookup(schema, user) + err := lookup(schema, user, role) if err != nil { msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } - if c.needConnectionPooler() { - roles := c.RolesConnectionPooler() - for _, r := range roles { - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, r) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + if c.ConnectionPooler[role] == nil { + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) + } + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } - if err != nil { - return nil, err - } + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - serviceSpec := c.generateConnectionPoolerService(&c.Spec, r) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + if err != nil { + return nil, err + } - if err != nil { - return nil, err - } - c.ConnectionPooler.Deployment[r] = deployment - c.ConnectionPooler.Service[r] = service + serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - } + if err != nil { + return nil, err } + c.ConnectionPooler[role].Deployment = deployment + c.ConnectionPooler[role].Service = service + + c.logger.Debugf("created new connection pooler %q, uid: %q", + util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - return c.ConnectionPooler, nil + return c.ConnectionPooler[role], nil } func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { @@ -181,7 +173,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate var deployment *appsv1.Deployment - deployment = c.ConnectionPooler.Deployment[role] + deployment = c.ConnectionPooler[role].Deployment policy := metav1.DeletePropagationForeground options := metav1.DeleteOptions{PropagationPolicy: &policy} @@ -206,7 +198,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Repeat the same for the service object var service *v1.Service - service = c.ConnectionPooler.Service[role] + service = c.ConnectionPooler[role].Service if service != nil { @@ -861,7 +853,7 @@ func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget { // the check were already done before. func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler.Deployment[role] == nil { + if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } @@ -874,9 +866,9 @@ func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeploym // worker at one time will try to update it chances of conflicts are // minimal. deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler.Deployment[role].Namespace).Patch( + Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler.Deployment[role].Name, + c.ConnectionPooler[role].Deployment.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, @@ -885,7 +877,7 @@ func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeploym return nil, fmt.Errorf("could not patch deployment: %v", err) } - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment return deployment, nil } @@ -897,9 +889,9 @@ func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]strin if err != nil { return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) } - result, err := c.KubeClient.Deployments(c.ConnectionPooler.Deployment[role].Namespace).Patch( + result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler.Deployment[role].Name, + c.ConnectionPooler[role].Deployment.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}, diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index c4a788970..da868fd30 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -857,9 +857,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) } if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} - c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) - c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) + //c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) + //c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) } if newNeedConnectionPooler { @@ -872,7 +872,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, // in this case also do not forget to install lookup function as for // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler.LookupFunction { + if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { newConnectionPooler := newSpec.Spec.ConnectionPooler specSchema := "" @@ -891,7 +891,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, specUser, c.OpConfig.ConnectionPooler.User) - if err = lookup(schema, user); err != nil { + if err = lookup(schema, user, role); err != nil { return NoSync, err } } @@ -913,14 +913,14 @@ func (c *Cluster) syncConnectionPooler(oldSpec, } } if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment[role] != nil || - c.ConnectionPooler.Service[role] != nil) { + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { if err = c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } } - if c.ConnectionPooler != nil && c.ConnectionPooler.Deployment[otherRole] == nil && c.ConnectionPooler.Service[otherRole] == nil { + if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { c.ConnectionPooler = nil } } @@ -936,13 +936,13 @@ func (c *Cluster) syncConnectionPooler(oldSpec, } } if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment[role] != nil || - c.ConnectionPooler.Service[role] != nil) { + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { if err = c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } - } else if c.ConnectionPooler.Deployment[otherRole] == nil && c.ConnectionPooler.Service[otherRole] == nil { + } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { c.ConnectionPooler = nil } } @@ -979,12 +979,12 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return NoSync, err } - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment } else if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler @@ -1018,7 +1018,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql return reason, fmt.Errorf(msg, err) } - oldDeploymentSpec := c.ConnectionPooler.Deployment[role] + oldDeploymentSpec := c.ConnectionPooler[role].Deployment deployment, err := c.updateConnectionPoolerDeployment( oldDeploymentSpec, @@ -1028,13 +1028,13 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return reason, err } - c.ConnectionPooler.Deployment[role] = deployment + c.ConnectionPooler[role].Deployment = deployment return reason, nil } } - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler.Deployment[role].Annotations) + newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) if newAnnotations != nil { c.updateConnectionPoolerAnnotations(newAnnotations, role) } @@ -1055,14 +1055,14 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return NoSync, err } - c.ConnectionPooler.Service[role] = service + c.ConnectionPooler[role].Service = service } else if err != nil { msg := "could not get connection pooler service to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler.Service[role] = service + c.ConnectionPooler[role].Service = service } return NoSync, nil diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 199914ccc..8aa519817 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -72,7 +72,7 @@ type ClusterStatus struct { type TemplateParams map[string]interface{} -type InstallFunction func(schema string, user string) error +type InstallFunction func(schema string, user string, role PostgresRole) error type SyncReason []string From ae3f5eff21eec4e380c2edacc3e8a7d3bf4c687a Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 6 Oct 2020 16:39:12 +0200 Subject: [PATCH 23/40] Make connection_pooler a separate package --- pkg/cluster/k8sres.go | 102 ---- pkg/cluster/resources.go | 193 ------ pkg/cluster/sync.go | 229 ------- pkg/connection_pooler/connection_pooler.go | 666 +++++++++++++++++++++ 4 files changed, 666 insertions(+), 524 deletions(-) create mode 100644 pkg/connection_pooler/connection_pooler.go diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index ecbe03212..c5bef5102 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2266,108 +2266,6 @@ func (c *Cluster) ownerReferences() []metav1.OwnerReference { } } -func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( - *appsv1.Deployment, error) { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - if *numberOfInstances < constants.ConnectionPoolerMinInstances { - msg := "Adjusted number of connection pooler instances from %d to %d" - c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) - - *numberOfInstances = constants.ConnectionPoolerMinInstances - } - - if err != nil { - return nil, err - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this deployment, but there is a hope that this object - // will be garbage collected if something went wrong and operator - // didn't deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: appsv1.DeploymentSpec{ - Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(role), - Template: *podTemplate, - }, - } - - return deployment, nil -} - -func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - serviceSpec := v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: c.connectionPoolerName(role), - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, - }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(role), - }, - } - - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this service, but there is a hope that this object will - // be garbage collected if something went wrong and operator didn't - // deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: serviceSpec, - } - - return service -} - func ensurePath(file string, defaultDir string, defaultFile string) string { if file == "" { return path.Join(defaultDir, defaultFile) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 419013339..fc06bad08 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -94,145 +94,6 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { return statefulSet, nil } -// Prepare the database for connection pooler to be used, i.e. install lookup -// function (do it first, because it should be fast and if it didn't succeed, -// it doesn't makes sense to create more K8S objects. At this moment we assume -// that necessary connection pooler user exists. -// -// After that create all the objects for connection pooler, namely a deployment -// with a chosen pooler and a service to expose it. -func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { - var msg string - c.setProcessName("creating connection pooler") - - schema := c.Spec.ConnectionPooler.Schema - - if schema == "" { - schema = c.OpConfig.ConnectionPooler.Schema - } - - user := c.Spec.ConnectionPooler.User - if user == "" { - user = c.OpConfig.ConnectionPooler.User - } - - err := lookup(schema, user, role) - - if err != nil { - msg = "could not prepare database for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - if c.ConnectionPooler[role] == nil { - c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) - } - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - - serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - c.ConnectionPooler[role].Deployment = deployment - c.ConnectionPooler[role].Service = service - - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - - return c.ConnectionPooler[role], nil -} - -func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { - c.setProcessName("deleting connection pooler") - c.logger.Debugln("deleting connection pooler") - - // Lack of connection pooler objects is not a fatal error, just log it if - // it was present before in the manifest - if c.ConnectionPooler == nil { - c.logger.Infof("No connection pooler to delete") - return nil - } - - // Clean up the deployment object. If deployment resource we've remembered - // is somehow empty, try to delete based on what would we generate - var deployment *appsv1.Deployment - deployment = c.ConnectionPooler[role].Deployment - - policy := metav1.DeletePropagationForeground - options := metav1.DeleteOptions{PropagationPolicy: &policy} - - if deployment != nil { - - // set delete propagation policy to foreground, so that replica set will be - // also deleted. - - err = c.KubeClient. - Deployments(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) - - if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler deployment was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete deployment: %v", err) - } - - c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) - } - - // Repeat the same for the service object - var service *v1.Service - service = c.ConnectionPooler[role].Service - - if service != nil { - - err = c.KubeClient. - Services(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) - - if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler service was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete service: %v", err) - } - - c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) - } - // Repeat the same for the secret object - secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) - - secret, err := c.KubeClient. - Secrets(c.Namespace). - Get(context.TODO(), secretName, metav1.GetOptions{}) - - if err != nil { - c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) - } else { - if err = c.deleteSecret(secret.UID, *secret); err != nil { - return fmt.Errorf("could not delete pooler secret: %v", err) - } - } - - c.ConnectionPooler = nil - return nil -} - func getPodIndex(podName string) (int32, error) { parts := strings.Split(podName, "-") if len(parts) == 0 { @@ -848,57 +709,3 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet { func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget { return c.PodDisruptionBudget } - -// Perform actual patching of a connection pooler deployment, assuming that all -// the check were already done before. -func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { - c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { - return nil, fmt.Errorf("there is no connection pooler in the cluster") - } - - patchData, err := specPatch(newDeployment.Spec) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment: %v", err) - } - - // An update probably requires RetryOnConflict, but since only one operator - // worker at one time will try to update it chances of conflicts are - // minimal. - deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( - context.TODO(), - c.ConnectionPooler[role].Deployment.Name, - types.MergePatchType, - patchData, - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch deployment: %v", err) - } - - c.ConnectionPooler[role].Deployment = deployment - - return deployment, nil -} - -//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { - c.logger.Debugf("updating connection pooler annotations") - patchData, err := metaAnnotationsPatch(annotations) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) - } - result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( - context.TODO(), - c.ConnectionPooler[role].Deployment.Name, - types.MergePatchType, - []byte(patchData), - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) - } - return result, nil - -} diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index da868fd30..49408fbf7 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -838,232 +838,3 @@ func (c *Cluster) syncLogicalBackupJob() error { return nil } - -func (c *Cluster) syncConnectionPooler(oldSpec, - newSpec *acidv1.Postgresql, - lookup InstallFunction) (SyncReason, error) { - - var reason SyncReason - var err error - var newNeedConnectionPooler, oldNeedConnectionPooler bool - - // Check and perform the sync requirements for each of the roles. - for _, role := range [2]PostgresRole{Master, Replica} { - if role == Master { - newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) - } else { - newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) - } - if c.ConnectionPooler == nil { - c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) - //c.ConnectionPooler.Deployment = make(map[PostgresRole]*appsv1.Deployment) - //c.ConnectionPooler.Service = make(map[PostgresRole]*v1.Service) - } - - if newNeedConnectionPooler { - // Try to sync in any case. If we didn't needed connection pooler before, - // it means we want to create it. If it was already present, still sync - // since it could happen that there is no difference in specs, and all - // the resources are remembered, but the deployment was manually deleted - // in between - c.logger.Debug("syncing connection pooler") - - // in this case also do not forget to install lookup function as for - // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { - newConnectionPooler := newSpec.Spec.ConnectionPooler - - specSchema := "" - specUser := "" - - if newConnectionPooler != nil { - specSchema = newConnectionPooler.Schema - specUser = newConnectionPooler.User - } - - schema := util.Coalesce( - specSchema, - c.OpConfig.ConnectionPooler.Schema) - - user := util.Coalesce( - specUser, - c.OpConfig.ConnectionPooler.User) - - if err = lookup(schema, user, role); err != nil { - return NoSync, err - } - } - - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) - return reason, err - } - } - - if oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } - if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil - } - } - - if !oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources if not empty - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil - } - } - } - - return reason, nil -} - -// Synchronize connection pooler resources. Effectively we're interested only in -// synchronizing the corresponding deployment, but in case of deployment or -// service is missing, create it. After checking, also remember an object for -// the future references. -func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( - SyncReason, error) { - - deployment, err := c.KubeClient. - Deployments(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Deployment %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) - - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return NoSync, fmt.Errorf(msg, err) - } - - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - c.ConnectionPooler[role].Deployment = deployment - } else if err != nil { - msg := "could not get connection pooler deployment to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - c.ConnectionPooler[role].Deployment = deployment - - // actual synchronization - oldConnectionPooler := oldSpec.Spec.ConnectionPooler - newConnectionPooler := newSpec.Spec.ConnectionPooler - - // sync implementation below assumes that both old and new specs are - // not nil, but it can happen. To avoid any confusion like updating a - // deployment because the specification changed from nil to an empty - // struct (that was initialized somewhere before) replace any nil with - // an empty spec. - if oldConnectionPooler == nil { - oldConnectionPooler = &acidv1.ConnectionPooler{} - } - - if newConnectionPooler == nil { - newConnectionPooler = &acidv1.ConnectionPooler{} - } - - c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) - - specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) - reason := append(specReason, defaultsReason...) - - if specSync || defaultsSync { - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(role), reason) - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg := "could not generate deployment for connection pooler: %v" - return reason, fmt.Errorf(msg, err) - } - - oldDeploymentSpec := c.ConnectionPooler[role].Deployment - - deployment, err := c.updateConnectionPoolerDeployment( - oldDeploymentSpec, - newDeploymentSpec, - role) - - if err != nil { - return reason, err - } - c.ConnectionPooler[role].Deployment = deployment - - return reason, nil - } - } - - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) - if newAnnotations != nil { - c.updateConnectionPoolerAnnotations(newAnnotations, role) - } - - service, err := c.KubeClient. - Services(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Service %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) - - serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - c.ConnectionPooler[role].Service = service - - } else if err != nil { - msg := "could not get connection pooler service to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler[role].Service = service - } - - return NoSync, nil -} diff --git a/pkg/connection_pooler/connection_pooler.go b/pkg/connection_pooler/connection_pooler.go new file mode 100644 index 000000000..8a218ce7f --- /dev/null +++ b/pkg/connection_pooler/connection_pooler.go @@ -0,0 +1,666 @@ +package connection_pooler + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/r3labs/diff" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/cluster" + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" +) + +// K8S objects that are belongs to a connection pooler +type ConnectionPoolerObjects struct { + Deployment *appsv1.Deployment + Service *v1.Service + Name string + // It could happen that a connection pooler was enabled, but the operator + // was not able to properly process a corresponding event or was restarted. + // In this case we will miss missing/require situation and a lookup function + // will not be installed. To avoid synchronizing it all the time to prevent + // this, we can remember the result in memory at least until the next + // restart. + LookupFunction bool +} + +// Prepare the database for connection pooler to be used, i.e. install lookup +// function (do it first, because it should be fast and if it didn't succeed, +// it doesn't makes sense to create more K8S objects. At this moment we assume +// that necessary connection pooler user exists. +// +// After that create all the objects for connection pooler, namely a deployment +// with a chosen pooler and a service to expose it. + +// have connectionpooler name in the cp object to have it immutable name +// add these cp related functions to a new cp file +// opConfig, cluster, and database name +func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.InstallFunction, role cluster.PostgresRole, c cluster.Cluster) (*ConnectionPoolerObjects, error) { + var msg string + c.setProcessName("creating connection pooler") + + schema := c.Spec.ConnectionPooler.Schema + + if schema == "" { + schema = c.OpConfig.ConnectionPooler.Schema + } + + user := c.Spec.ConnectionPooler.User + if user == "" { + user = c.OpConfig.ConnectionPooler.User + } + + err := c.lookup(schema, user, role) + + if err != nil { + msg = "could not prepare database for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + if c.ConnectionPooler[role] == nil { + c.ConnectionPooler = make(map[c.PostgresRole]*ConnectionPoolerObjects) + c.ConnectionPooler[role].Deployment = nil + c.ConnectionPooler[role].Service = nil + c.ConnectionPooler[role].LookupFunction = false + } + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + + serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + c.ConnectionPooler[role].Deployment = deployment + c.ConnectionPooler[role].Service = service + + c.logger.Debugf("created new connection pooler %q, uid: %q", + util.NameFromMeta(deployment.ObjectMeta), deployment.UID) + + return c.ConnectionPooler[role], nil +} + +func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) ( + *appsv1.Deployment, error) { + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + if *numberOfInstances < constants.ConnectionPoolerMinInstances { + msg := "Adjusted number of connection pooler instances from %d to %d" + c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) + + *numberOfInstances = constants.ConnectionPoolerMinInstances + } + + if err != nil { + return nil, err + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName(role), + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this deployment, but there is a hope that this object + // will be garbage collected if something went wrong and operator + // didn't deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: numberOfInstances, + Selector: c.connectionPoolerLabelsSelector(role), + Template: *podTemplate, + }, + } + + return deployment, nil +} + +func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) *v1.Service { + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + serviceSpec := v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: c.connectionPoolerName(role), + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, + }, + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(role), + }, + } + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName(role), + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this service, but there is a hope that this object will + // be garbage collected if something went wrong and operator didn't + // deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: serviceSpec, + } + + return service +} + +// delete connection pooler +func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role cluster.PostgresRole, c cluster.Cluster) (err error) { + c.setProcessName("deleting connection pooler") + c.logger.Debugln("deleting connection pooler") + + // Lack of connection pooler objects is not a fatal error, just log it if + // it was present before in the manifest + if c.ConnectionPooler == nil { + c.logger.Infof("No connection pooler to delete") + return nil + } + + // Clean up the deployment object. If deployment resource we've remembered + // is somehow empty, try to delete based on what would we generate + var deployment *appsv1.Deployment + deployment = c.ConnectionPooler[role].Deployment + + policy := metav1.DeletePropagationForeground + options := metav1.DeleteOptions{PropagationPolicy: &policy} + + if deployment != nil { + + // set delete propagation policy to foreground, so that replica set will be + // also deleted. + + err = c.KubeClient. + Deployments(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler deployment was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete deployment: %v", err) + } + + c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) + } + + // Repeat the same for the service object + var service *v1.Service + service = c.ConnectionPooler[role].Service + + if service != nil { + + err = c.KubeClient. + Services(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler service was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete service: %v", err) + } + + c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) + } + // Repeat the same for the secret object + secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) + + secret, err := c.KubeClient. + Secrets(c.Namespace). + Get(context.TODO(), secretName, metav1.GetOptions{}) + + if err != nil { + c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) + } else { + if err = c.deleteSecret(secret.UID, *secret); err != nil { + return fmt.Errorf("could not delete pooler secret: %v", err) + } + } + + c.ConnectionPooler = nil + return nil +} + +// Perform actual patching of a connection pooler deployment, assuming that all +// the check were already done before. +func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { + c.setProcessName("updating connection pooler") + if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { + return nil, fmt.Errorf("there is no connection pooler in the cluster") + } + + patchData, err := specPatch(newDeployment.Spec) + if err != nil { + return nil, fmt.Errorf("could not form patch for the deployment: %v", err) + } + + // An update probably requires RetryOnConflict, but since only one operator + // worker at one time will try to update it chances of conflicts are + // minimal. + deployment, err := c.KubeClient. + Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + context.TODO(), + c.ConnectionPooler[role].Deployment.Name, + types.MergePatchType, + patchData, + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch deployment: %v", err) + } + + c.ConnectionPooler[role].Deployment = deployment + + return deployment, nil +} + +//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment +func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { + c.logger.Debugf("updating connection pooler annotations") + patchData, err := metaAnnotationsPatch(annotations) + if err != nil { + return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) + } + result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + context.TODO(), + c.ConnectionPooler[role].Deployment.Name, + types.MergePatchType, + []byte(patchData), + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) + } + return result, nil + +} + +//sync connection pooler + +// Test if two connection pooler configuration needs to be synced. For simplicity +// compare not the actual K8S objects, but the configuration itself and request +// sync if there is any difference. +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, c cluster.Cluster) (sync bool, reasons []string) { + reasons = []string{} + sync = false + + changelog, err := diff.Diff(oldSpec, newSpec) + if err != nil { + c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + return false, reasons + } + + if len(changelog) > 0 { + sync = true + } + + for _, change := range changelog { + msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", + change.Type, change.Path, change.From, change.To) + reasons = append(reasons, msg) + } + + return sync, reasons +} + +// Check if we need to synchronize connection pooler deployment due to new +// defaults, that are different from what we see in the DeploymentSpec +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment, c cluster.Cluster) (sync bool, reasons []string) { + + reasons = []string{} + sync = false + + config := c.OpConfig.ConnectionPooler + podTemplate := deployment.Spec.Template + poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] + + if spec == nil { + spec = &acidv1.ConnectionPooler{} + } + + if spec.NumberOfInstances == nil && + *deployment.Spec.Replicas != *config.NumberOfInstances { + + sync = true + msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", + *deployment.Spec.Replicas, *config.NumberOfInstances) + reasons = append(reasons, msg) + } + + if spec.DockerImage == "" && + poolerContainer.Image != config.Image { + + sync = true + msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", + poolerContainer.Image, config.Image) + reasons = append(reasons, msg) + } + + expectedResources, err := generateResourceRequirements(spec.Resources, + c.makeDefaultConnectionPoolerResources()) + + // An error to generate expected resources means something is not quite + // right, but for the purpose of robustness do not panic here, just report + // and ignore resources comparison (in the worst case there will be no + // updates for new resource values). + if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { + sync = true + msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", + poolerContainer.Resources, expectedResources) + reasons = append(reasons, msg) + } + + if err != nil { + c.logger.Warningf("Cannot generate expected resources, %v", err) + } + + for _, env := range poolerContainer.Env { + if spec.User == "" && env.Name == "PGUSER" { + ref := env.ValueFrom.SecretKeyRef.LocalObjectReference + + if ref.Name != c.credentialSecretName(config.User) { + sync = true + msg := fmt.Sprintf("pooler user is different (having %s, required %s)", + ref.Name, config.User) + reasons = append(reasons, msg) + } + } + + if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { + sync = true + msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", + env.Value, config.Schema) + reasons = append(reasons, msg) + } + } + + return sync, reasons +} + +func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup cluster.InstallFunction, c cluster.Cluster) (SyncReason, error) { + + var reason SyncReason + var err error + var newNeedConnectionPooler, oldNeedConnectionPooler bool + + // Check and perform the sync requirements for each of the roles. + for _, role := range [2]PostgresRole{Master, Replica} { + if role == Master { + newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) + } else { + newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) + } + if c.ConnectionPooler == nil { + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) + c.ConnectionPooler[role].Deployment = nil + c.ConnectionPooler[role].Service = nil + c.ConnectionPooler[role].LookupFunction = false + } + + if newNeedConnectionPooler { + // Try to sync in any case. If we didn't needed connection pooler before, + // it means we want to create it. If it was already present, still sync + // since it could happen that there is no difference in specs, and all + // the resources are remembered, but the deployment was manually deleted + // in between + c.logger.Debug("syncing connection pooler for the role %v", role) + + // in this case also do not forget to install lookup function as for + // creating cluster + if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { + newConnectionPooler := newSpec.Spec.ConnectionPooler + + specSchema := "" + specUser := "" + + if newConnectionPooler != nil { + specSchema = newConnectionPooler.Schema + specUser = newConnectionPooler.User + } + + schema := util.Coalesce( + specSchema, + c.OpConfig.ConnectionPooler.Schema) + + user := util.Coalesce( + specUser, + c.OpConfig.ConnectionPooler.User) + + if err = lookup(schema, user, role); err != nil { + return NoSync, err + } + } + + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + return reason, err + } + } + + if oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources + otherRole := role + if len(c.RolesConnectionPooler()) == 2 { + if role == Master { + otherRole = Replica + } else { + otherRole = Master + } + } + if c.ConnectionPooler != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { + c.ConnectionPooler = nil + } + } + + if !oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources if not empty + otherRole := role + if len(c.RolesConnectionPooler()) == 2 { + if role == Master { + otherRole = Replica + } else { + otherRole = Master + } + } + if c.ConnectionPooler != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { + c.ConnectionPooler = nil + } + } + } + + return reason, nil +} + +// Synchronize connection pooler resources. Effectively we're interested only in +// synchronizing the corresponding deployment, but in case of deployment or +// service is missing, create it. After checking, also remember an object for +// the future references. +func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role cluster.PostgresRole, c cluster.Cluster) ( + SyncReason, error) { + + deployment, err := c.KubeClient. + Deployments(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Deployment %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return NoSync, fmt.Errorf(msg, err) + } + + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Deployment = deployment + } else if err != nil { + msg := "could not get connection pooler deployment to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + c.ConnectionPooler[role].Deployment = deployment + + // actual synchronization + oldConnectionPooler := oldSpec.Spec.ConnectionPooler + newConnectionPooler := newSpec.Spec.ConnectionPooler + + // sync implementation below assumes that both old and new specs are + // not nil, but it can happen. To avoid any confusion like updating a + // deployment because the specification changed from nil to an empty + // struct (that was initialized somewhere before) replace any nil with + // an empty spec. + if oldConnectionPooler == nil { + oldConnectionPooler = &acidv1.ConnectionPooler{} + } + + if newConnectionPooler == nil { + newConnectionPooler = &acidv1.ConnectionPooler{} + } + + c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) + + specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) + reason := append(specReason, defaultsReason...) + + if specSync || defaultsSync { + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg := "could not generate deployment for connection pooler: %v" + return reason, fmt.Errorf(msg, err) + } + + oldDeploymentSpec := c.ConnectionPooler[role].Deployment + + deployment, err := c.updateConnectionPoolerDeployment( + oldDeploymentSpec, + newDeploymentSpec, + role) + + if err != nil { + return reason, err + } + c.ConnectionPooler[role].Deployment = deployment + + return reason, nil + } + } + + newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) + if newAnnotations != nil { + c.updateConnectionPoolerAnnotations(newAnnotations, role) + } + + service, err := c.KubeClient. + Services(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Service %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Service = service + + } else if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + // Service updates are not supported and probably not that useful anyway + c.ConnectionPooler[role].Service = service + } + + return NoSync, nil +} From 4700fc7328275fd9a8ce0362c723a2dd0ffe6b8b Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 6 Oct 2020 21:36:19 +0200 Subject: [PATCH 24/40] Add interface to use common functions --- pkg/cluster/cluster.go | 181 ++----- pkg/cluster/k8sres.go | 201 -------- pkg/cluster/sync.go | 6 +- pkg/cluster/util.go | 58 --- pkg/connection_pooler/connection_pooler.go | 557 ++++++++++++++++----- pkg/pooler_interface/pooler_interface.go | 18 + 6 files changed, 477 insertions(+), 544 deletions(-) create mode 100644 pkg/pooler_interface/pooler_interface.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 940af7e5b..39434b1b6 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -12,7 +12,6 @@ import ( "sync" "time" - "github.com/r3labs/diff" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -25,6 +24,8 @@ import ( "k8s.io/client-go/tools/reference" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + + "github.com/zalando/postgres-operator/pkg/connection_pooler" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -34,6 +35,7 @@ import ( "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" + rbacv1 "k8s.io/api/rbac/v1" ) @@ -53,20 +55,6 @@ type Config struct { PodServiceAccountRoleBinding *rbacv1.RoleBinding } -// K8S objects that are belongs to a connection pooler -type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service - - // It could happen that a connection pooler was enabled, but the operator - // was not able to properly process a corresponding event or was restarted. - // In this case we will miss missing/require situation and a lookup function - // will not be installed. To avoid synchronizing it all the time to prevent - // this, we can remember the result in memory at least until the next - // restart. - LookupFunction bool -} - type kubeResources struct { Services map[PostgresRole]*v1.Service Endpoints map[PostgresRole]*v1.Endpoints @@ -101,9 +89,8 @@ type Cluster struct { currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex - ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects + ConnectionPooler map[PostgresRole]*connection_pooler.ConnectionPoolerObjects } - type compareStatefulsetResult struct { match bool replace bool @@ -345,12 +332,19 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - for _, r := range c.RolesConnectionPooler() { - if c.ConnectionPooler[r] != nil { - c.logger.Warning("Connection pooler already exists in the cluster") - return nil + if c.needConnectionPooler() { + + roles := c.RolesConnectionPooler() + for _, r := range roles { + c.logger.Warningf("found roles are %v", r) + } - connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) + for _, r := range c.RolesConnectionPooler() { + if c.ConnectionPooler[r] != nil { + c.logger.Warning("Connection pooler already exists in the cluster") + return nil + } + connectionPooler, err := c.ConnectionPooler[r].createConnectionPooler(c.installLookupFunction, r) if err != nil { c.logger.Warningf("could not create connection pooler: %v", err) return nil @@ -359,7 +353,6 @@ func (c *Cluster) Create() error { util.NameFromMeta(connectionPooler.Deployment.ObjectMeta), r) } } - return nil } @@ -781,15 +774,31 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } // sync connection pooler - if _, err := c.syncConnectionPooler(oldSpec, newSpec, - c.installLookupFunction); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) - updateFailed = true + for _, role := range c.RolesConnectionPooler() { + if _, err := c.ConnectionPooler[role].syncConnectionPooler(oldSpec, newSpec, + c.installLookupFunction); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + updateFailed = true + } } return nil } +func syncResources(a, b *v1.ResourceRequirements) bool { + for _, res := range []v1.ResourceName{ + v1.ResourceCPU, + v1.ResourceMemory, + } { + if !a.Limits[res].Equal(b.Limits[res]) || + !a.Requests[res].Equal(b.Requests[res]) { + return true + } + } + + return false +} + // Delete deletes the cluster and cleans up all objects associated with it (including statefulsets). // The deletion order here is somewhat significant, because Patroni, when running with the Kubernetes // DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint @@ -839,7 +848,7 @@ func (c *Cluster) Delete() { // manifest, just to not keep orphaned components in case if something went // wrong for _, role := range [2]PostgresRole{Master, Replica} { - if err := c.deleteConnectionPooler(role); err != nil { + if err := c.ConnectionPooler[role].deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } } @@ -1387,119 +1396,3 @@ func (c *Cluster) deletePatroniClusterConfigMaps() error { return c.deleteClusterObject(get, deleteConfigMapFn, "configmap") } - -// Test if two connection pooler configuration needs to be synced. For simplicity -// compare not the actual K8S objects, but the configuration itself and request -// sync if there is any difference. -func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { - reasons = []string{} - sync = false - - changelog, err := diff.Diff(oldSpec, newSpec) - if err != nil { - c.logger.Infof("Cannot get diff, do not do anything, %+v", err) - return false, reasons - } - - if len(changelog) > 0 { - sync = true - } - - for _, change := range changelog { - msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", - change.Type, change.Path, change.From, change.To) - reasons = append(reasons, msg) - } - - return sync, reasons -} - -func syncResources(a, b *v1.ResourceRequirements) bool { - for _, res := range []v1.ResourceName{ - v1.ResourceCPU, - v1.ResourceMemory, - } { - if !a.Limits[res].Equal(b.Limits[res]) || - !a.Requests[res].Equal(b.Requests[res]) { - return true - } - } - - return false -} - -// Check if we need to synchronize connection pooler deployment due to new -// defaults, that are different from what we see in the DeploymentSpec -func (c *Cluster) needSyncConnectionPoolerDefaults( - spec *acidv1.ConnectionPooler, - deployment *appsv1.Deployment) (sync bool, reasons []string) { - - reasons = []string{} - sync = false - - config := c.OpConfig.ConnectionPooler - podTemplate := deployment.Spec.Template - poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] - - if spec == nil { - spec = &acidv1.ConnectionPooler{} - } - - if spec.NumberOfInstances == nil && - *deployment.Spec.Replicas != *config.NumberOfInstances { - - sync = true - msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", - *deployment.Spec.Replicas, *config.NumberOfInstances) - reasons = append(reasons, msg) - } - - if spec.DockerImage == "" && - poolerContainer.Image != config.Image { - - sync = true - msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", - poolerContainer.Image, config.Image) - reasons = append(reasons, msg) - } - - expectedResources, err := generateResourceRequirements(spec.Resources, - c.makeDefaultConnectionPoolerResources()) - - // An error to generate expected resources means something is not quite - // right, but for the purpose of robustness do not panic here, just report - // and ignore resources comparison (in the worst case there will be no - // updates for new resource values). - if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { - sync = true - msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", - poolerContainer.Resources, expectedResources) - reasons = append(reasons, msg) - } - - if err != nil { - c.logger.Warningf("Cannot generate expected resources, %v", err) - } - - for _, env := range poolerContainer.Env { - if spec.User == "" && env.Name == "PGUSER" { - ref := env.ValueFrom.SecretKeyRef.LocalObjectReference - - if ref.Name != c.credentialSecretName(config.User) { - sync = true - msg := fmt.Sprintf("pooler user is different (having %s, required %s)", - ref.Name, config.User) - reasons = append(reasons, msg) - } - } - - if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { - sync = true - msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", - env.Value, config.Schema) - reasons = append(reasons, msg) - } - } - - return sync, reasons -} diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index c5bef5102..9acf39294 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -75,14 +75,6 @@ func (c *Cluster) statefulSetName() string { return c.Name } -func (c *Cluster) connectionPoolerName(role PostgresRole) string { - name := c.Name + "-pooler" - if role == Replica { - name = name + "-repl" - } - return name -} - func (c *Cluster) endpointName(role PostgresRole) string { name := c.Name if role == Replica { @@ -146,26 +138,6 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources { } } -// Generate default resource section for connection pooler deployment, to be -// used if nothing custom is specified in the manifest -func (c *Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources { - config := c.OpConfig - - defaultRequests := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, - } - defaultLimits := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, - } - - return acidv1.Resources{ - ResourceRequests: defaultRequests, - ResourceLimits: defaultLimits, - } -} - func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { var err error @@ -2068,179 +2040,6 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) { return "logical-backup-" + c.clusterName().Name } -// Generate pool size related environment variables. -// -// MAX_DB_CONN would specify the global maximum for connections to a target -// database. -// -// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. -// -// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when -// most of the queries coming through a connection pooler are from the same -// user to the same db). In case if we want to spin up more connection pooler -// instances, take this into account and maintain the same number of -// connections. -// -// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload -// have to wait for spinning up a new connections. -// -// RESERVE_SIZE is how many additional connections to allow for a pooler. -func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { - effectiveMode := util.Coalesce( - spec.ConnectionPooler.Mode, - c.OpConfig.ConnectionPooler.Mode) - - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - effectiveMaxDBConn := util.CoalesceInt32( - spec.ConnectionPooler.MaxDBConnections, - c.OpConfig.ConnectionPooler.MaxDBConnections) - - if effectiveMaxDBConn == nil { - effectiveMaxDBConn = k8sutil.Int32ToPointer( - constants.ConnectionPoolerMaxDBConnections) - } - - maxDBConn := *effectiveMaxDBConn / *numberOfInstances - - defaultSize := maxDBConn / 2 - minSize := defaultSize / 2 - reserveSize := minSize - - return []v1.EnvVar{ - { - Name: "CONNECTION_POOLER_PORT", - Value: fmt.Sprint(pgPort), - }, - { - Name: "CONNECTION_POOLER_MODE", - Value: effectiveMode, - }, - { - Name: "CONNECTION_POOLER_DEFAULT_SIZE", - Value: fmt.Sprint(defaultSize), - }, - { - Name: "CONNECTION_POOLER_MIN_SIZE", - Value: fmt.Sprint(minSize), - }, - { - Name: "CONNECTION_POOLER_RESERVE_SIZE", - Value: fmt.Sprint(reserveSize), - }, - { - Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", - Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), - }, - { - Name: "CONNECTION_POOLER_MAX_DB_CONN", - Value: fmt.Sprint(maxDBConn), - }, - } -} - -func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( - *v1.PodTemplateSpec, error) { - - gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) - resources, err := generateResourceRequirements( - spec.ConnectionPooler.Resources, - c.makeDefaultConnectionPoolerResources()) - - effectiveDockerImage := util.Coalesce( - spec.ConnectionPooler.DockerImage, - c.OpConfig.ConnectionPooler.Image) - - effectiveSchema := util.Coalesce( - spec.ConnectionPooler.Schema, - c.OpConfig.ConnectionPooler.Schema) - - if err != nil { - return nil, fmt.Errorf("could not generate resource requirements: %v", err) - } - - secretSelector := func(key string) *v1.SecretKeySelector { - effectiveUser := util.Coalesce( - spec.ConnectionPooler.User, - c.OpConfig.ConnectionPooler.User) - - return &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: c.credentialSecretName(effectiveUser), - }, - Key: key, - } - } - - envVars := []v1.EnvVar{ - { - Name: "PGHOST", - Value: c.serviceAddress(role), - }, - { - Name: "PGPORT", - Value: c.servicePort(role), - }, - { - Name: "PGUSER", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("username"), - }, - }, - // the convention is to use the same schema name as - // connection pooler username - { - Name: "PGSCHEMA", - Value: effectiveSchema, - }, - { - Name: "PGPASSWORD", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("password"), - }, - }, - } - - envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...) - - poolerContainer := v1.Container{ - Name: connectionPoolerContainer, - Image: effectiveDockerImage, - ImagePullPolicy: v1.PullIfNotPresent, - Resources: *resources, - Ports: []v1.ContainerPort{ - { - ContainerPort: pgPort, - Protocol: v1.ProtocolTCP, - }, - }, - Env: envVars, - } - - podTemplate := &v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, - Namespace: c.Namespace, - Annotations: c.generatePodAnnotations(spec), - }, - Spec: v1.PodSpec{ - ServiceAccountName: c.OpConfig.PodServiceAccountName, - TerminationGracePeriodSeconds: &gracePeriod, - Containers: []v1.Container{poolerContainer}, - // TODO: add tolerations to scheduler pooler on the same node - // as database - //Tolerations: *tolerationsSpec, - }, - } - - return podTemplate, nil -} - // Return an array of ownerReferences to make an arbitraty object dependent on // the StatefulSet. Dependency is made on StatefulSet instead of PostgreSQL CRD // while the former is represent the actual state, and only it's deletion means diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 49408fbf7..0738f2b77 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -128,8 +128,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } // sync connection pooler - if _, err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { - return fmt.Errorf("could not sync connection pooler: %v", err) + for _, role := range c.RolesConnectionPooler() { + if _, err = c.ConnectionPooler[role].syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { + return fmt.Errorf("could not sync connection pooler: %v", err) + } } return err diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index f00086e50..d5b9bfb67 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -415,30 +415,6 @@ func (c *Cluster) labelsSelector() *metav1.LabelSelector { } } -// Return connection pooler labels selector, which should from one point of view -// inherit most of the labels from the cluster itself, but at the same time -// have e.g. different `application` label, so that recreatePod operation will -// not interfere with it (it lists all the pods via labels, and if there would -// be no difference, it will recreate also pooler pods). -func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { - connectionPoolerLabels := labels.Set(map[string]string{}) - - extraLabels := labels.Set(map[string]string{ - "connection-pooler-name": c.connectionPoolerName(role), - "application": "db-connection-pooler", - "role": string(role), - "cluster-name": c.ClusterName, - }) - - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels) - - return &metav1.LabelSelector{ - MatchLabels: connectionPoolerLabels, - MatchExpressions: nil, - } -} - func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) labels.Set { lbls := c.labelsSet(shouldAddExtraLabels) lbls[c.OpConfig.PodRoleLabel] = string(role) @@ -521,40 +497,6 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { return c.OpConfig.KubernetesUseConfigMaps } -// isConnectionPoolerEnabled -func (c *Cluster) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) -} - -func (c *Cluster) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler -} - -func (c *Cluster) needMasterConnectionPooler() bool { - return c.needMasterConnectionPoolerWorker(&c.Spec) -} - -func (c *Cluster) needConnectionPooler() bool { - return c.needMasterConnectionPoolerWorker(&c.Spec) || c.needReplicaConnectionPoolerWorker(&c.Spec) -} - -// RolesConnectionPooler gives the list of roles which need connection pooler -func (c *Cluster) RolesConnectionPooler() []PostgresRole { - roles := make([]PostgresRole, 2) - - if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Master) - } - if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Replica) - } - return roles -} - -func (c *Cluster) needReplicaConnectionPooler() bool { - return c.needReplicaConnectionPoolerWorker(&c.Spec) -} - // Earlier arguments take priority func mergeContainers(containers ...[]v1.Container) ([]v1.Container, []string) { containerNameTaken := map[string]bool{} diff --git a/pkg/connection_pooler/connection_pooler.go b/pkg/connection_pooler/connection_pooler.go index 8a218ce7f..cf24f808a 100644 --- a/pkg/connection_pooler/connection_pooler.go +++ b/pkg/connection_pooler/connection_pooler.go @@ -4,25 +4,35 @@ import ( "context" "fmt" + "github.com/r3labs/diff" + "github.com/sirupsen/logrus" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/pooler_interface" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "github.com/r3labs/diff" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/cluster" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" ) +const ( + connectionPoolerContainer = "connection-pooler" + pgPort = 5432 +) + // K8S objects that are belongs to a connection pooler type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service - Name string + Deployment *appsv1.Deployment + Service *v1.Service + Name string + ClusterName string + Namespace string + logger *logrus.Entry // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. // In this case we will miss missing/require situation and a lookup function @@ -32,6 +42,92 @@ type ConnectionPoolerObjects struct { LookupFunction bool } +type SyncReason []string + +// no sync happened, empty value +var NoSync SyncReason = []string{} + +// PostgresRole describes role of the node +type PostgresRole string + +const ( + // Master role + Master PostgresRole = "master" + + // Replica role + Replica PostgresRole = "replica" +) + +type InstallFunction func(schema string, user string, role PostgresRole) error + +func (cp *ConnectionPoolerObjects) connectionPoolerName(role PostgresRole) string { + name := cp.ClusterName + "-pooler" + if role == Replica { + name = name + "-repl" + } + return name +} + +// isConnectionPoolerEnabled +func (cp *ConnectionPoolerObjects) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) +} + +func (cp *ConnectionPoolerObjects) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler +} + +//TODO: use spec from cluster +func (cp *ConnectionPoolerObjects) needMasterConnectionPooler() bool { + return cp.needMasterConnectionPoolerWorker(&c.Spec) +} + +func (cp *ConnectionPoolerObjects) needConnectionPooler() bool { + return cp.needMasterConnectionPoolerWorker(&c.Spec) || cp.needReplicaConnectionPoolerWorker(&c.Spec) +} + +// RolesConnectionPooler gives the list of roles which need connection pooler +func (cp *ConnectionPoolerObjects) RolesConnectionPooler() []PostgresRole { + roles := make([]PostgresRole, 2) + + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles = append(roles, Master) + } + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles = append(roles, Replica) + } + return roles +} + +func (cp *ConnectionPoolerObjects) needReplicaConnectionPooler() bool { + return cp.needReplicaConnectionPoolerWorker(&c.Spec) +} + +// Return connection pooler labels selector, which should from one point of view +// inherit most of the labels from the cluster itself, but at the same time +// have e.g. different `application` label, so that recreatePod operation will +// not interfere with it (it lists all the pods via labels, and if there would +// be no difference, it will recreate also pooler pods). +func (cp *ConnectionPoolerObjects) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { + connectionPoolerLabels := labels.Set(map[string]string{}) + + extraLabels := labels.Set(map[string]string{ + "connection-pooler-name": cp.connectionPoolerName(role), + "application": "db-connection-pooler", + "role": string(role), + "cluster-name": cp.ClusterName, + }) + + connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) + connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels) + + return &metav1.LabelSelector{ + MatchLabels: connectionPoolerLabels, + MatchExpressions: nil, + } +} + +//TODO: how to use cluster type! // Prepare the database for connection pooler to be used, i.e. install lookup // function (do it first, because it should be fast and if it didn't succeed, // it doesn't makes sense to create more K8S objects. At this moment we assume @@ -43,7 +139,7 @@ type ConnectionPoolerObjects struct { // have connectionpooler name in the cp object to have it immutable name // add these cp related functions to a new cp file // opConfig, cluster, and database name -func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.InstallFunction, role cluster.PostgresRole, c cluster.Cluster) (*ConnectionPoolerObjects, error) { +func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { var msg string c.setProcessName("creating connection pooler") @@ -58,19 +154,19 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.Install user = c.OpConfig.ConnectionPooler.User } - err := c.lookup(schema, user, role) + err := lookup(schema, user, role) if err != nil { msg = "could not prepare database for connection pooler: %v" return nil, fmt.Errorf(msg, err) } if c.ConnectionPooler[role] == nil { - c.ConnectionPooler = make(map[c.PostgresRole]*ConnectionPoolerObjects) + c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) c.ConnectionPooler[role].Deployment = nil c.ConnectionPooler[role].Service = nil c.ConnectionPooler[role].LookupFunction = false } - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + deploymentSpec, err := c.ConnectionPooler[role].generateConnectionPoolerDeployment(&c.Spec, role) if err != nil { msg = "could not generate deployment for connection pooler: %v" return nil, fmt.Errorf(msg, err) @@ -87,7 +183,7 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.Install return nil, err } - serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) + serviceSpec := c.ConnectionPooler[role].generateConnectionPoolerService(&c.Spec, role) service, err := c.KubeClient. Services(serviceSpec.Namespace). Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) @@ -104,7 +200,184 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup cluster.Install return c.ConnectionPooler[role], nil } -func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) ( +//TODO: Figure out how can we go about for the opconfig required here! +// +// Generate pool size related environment variables. +// +// MAX_DB_CONN would specify the global maximum for connections to a target +// database. +// +// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. +// +// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when +// most of the queries coming through a connection pooler are from the same +// user to the same db). In case if we want to spin up more connection pooler +// instances, take this into account and maintain the same number of +// connections. +// +// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload +// have to wait for spinning up a new connections. +// +// RESERVE_SIZE is how many additional connections to allow for a pooler. +func (cp *ConnectionPoolerObjects) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { + effectiveMode := util.Coalesce( + spec.ConnectionPooler.Mode, + c.OpConfig.ConnectionPooler.Mode) + + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + effectiveMaxDBConn := util.CoalesceInt32( + spec.ConnectionPooler.MaxDBConnections, + c.OpConfig.ConnectionPooler.MaxDBConnections) + + if effectiveMaxDBConn == nil { + effectiveMaxDBConn = k8sutil.Int32ToPointer( + constants.ConnectionPoolerMaxDBConnections) + } + + maxDBConn := *effectiveMaxDBConn / *numberOfInstances + + defaultSize := maxDBConn / 2 + minSize := defaultSize / 2 + reserveSize := minSize + + return []v1.EnvVar{ + { + Name: "CONNECTION_POOLER_PORT", + Value: fmt.Sprint(pgPort), + }, + { + Name: "CONNECTION_POOLER_MODE", + Value: effectiveMode, + }, + { + Name: "CONNECTION_POOLER_DEFAULT_SIZE", + Value: fmt.Sprint(defaultSize), + }, + { + Name: "CONNECTION_POOLER_MIN_SIZE", + Value: fmt.Sprint(minSize), + }, + { + Name: "CONNECTION_POOLER_RESERVE_SIZE", + Value: fmt.Sprint(reserveSize), + }, + { + Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", + Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), + }, + { + Name: "CONNECTION_POOLER_MAX_DB_CONN", + Value: fmt.Sprint(maxDBConn), + }, + } +} + +// TODO: Figure out how can we go about for the opconfig required here! +func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( + *v1.PodTemplateSpec, error) { + + gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) + resources, err := pooler_interface.pooler.pooler.generateResourceRequirements( + spec.ConnectionPooler.Resources, + cp.makeDefaultConnectionPoolerResources()) + + effectiveDockerImage := util.Coalesce( + spec.ConnectionPooler.DockerImage, + c.OpConfig.ConnectionPooler.Image) + + effectiveSchema := util.Coalesce( + spec.ConnectionPooler.Schema, + c.OpConfig.ConnectionPooler.Schema) + + if err != nil { + return nil, fmt.Errorf("could not generate resource requirements: %v", err) + } + + secretSelector := func(key string) *v1.SecretKeySelector { + effectiveUser := util.Coalesce( + spec.ConnectionPooler.User, + c.OpConfig.ConnectionPooler.User) + + return &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: pooler_interface.pooler.pooler.credentialSecretName(effectiveUser), + }, + Key: key, + } + } + + envVars := []v1.EnvVar{ + { + Name: "PGHOST", + Value: pooler_interface.pooler.pooler.serviceAddress(role), + }, + { + Name: "PGPORT", + Value: pooler_interface.pooler.pooler.servicePort(role), + }, + { + Name: "PGUSER", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("username"), + }, + }, + // the convention is to use the same schema name as + // connection pooler username + { + Name: "PGSCHEMA", + Value: effectiveSchema, + }, + { + Name: "PGPASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("password"), + }, + }, + } + + envVars = append(envVars, cp.getConnectionPoolerEnvVars(spec)...) + + poolerContainer := v1.Container{ + Name: connectionPoolerContainer, + Image: effectiveDockerImage, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resources, + Ports: []v1.ContainerPort{ + { + ContainerPort: pgPort, + Protocol: v1.ProtocolTCP, + }, + }, + Env: envVars, + } + + podTemplate := &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, + Namespace: cp.Namespace, + Annotations: pooler_interface.pooler.pooler.generatePodAnnotations(spec), + }, + Spec: v1.PodSpec{ + ServiceAccountName: c.OpConfig.PodServiceAccountName, + TerminationGracePeriodSeconds: &gracePeriod, + Containers: []v1.Container{poolerContainer}, + // TODO: add tolerations to scheduler pooler on the same node + // as database + //Tolerations: *tolerationsSpec, + }, + } + + return podTemplate, nil +} + +//TODO: How to use opconfig from cluster type +func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( *appsv1.Deployment, error) { // there are two ways to enable connection pooler, either to specify a @@ -117,7 +390,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) + podTemplate, err := cp.generateConnectionPoolerPodTemplate(spec, role) numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( @@ -127,7 +400,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid if *numberOfInstances < constants.ConnectionPoolerMinInstances { msg := "Adjusted number of connection pooler instances from %d to %d" - c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) + cp.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) *numberOfInstances = constants.ConnectionPoolerMinInstances } @@ -138,9 +411,9 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Name: cp.connectionPoolerName(role), + Namespace: cp.Namespace, + Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -148,11 +421,11 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid // clean up this deployment, but there is a hope that this object // will be garbage collected if something went wrong and operator // didn't deleted it. - OwnerReferences: c.ownerReferences(), + OwnerReferences: pooler_interface.pooler.ownerReferences(), }, Spec: appsv1.DeploymentSpec{ Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(role), + Selector: cp.connectionPoolerLabelsSelector(role), Template: *podTemplate, }, } @@ -160,7 +433,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid return deployment, nil } -func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role cluster.PostgresRole, c cluster.Cluster) *v1.Service { +func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { // there are two ways to enable connection pooler, either to specify a // connectionPooler section or enableConnectionPooler. In the second case @@ -175,22 +448,22 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1. serviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ { - Name: c.connectionPoolerName(role), + Name: cp.connectionPoolerName(role), Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, + TargetPort: intstr.IntOrString{StrVal: pooler_interface.pooler.servicePort(role)}, }, }, Type: v1.ServiceTypeClusterIP, Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(role), + "connection-pooler": cp.connectionPoolerName(role), }, } service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Name: cp.connectionPoolerName(role), + Namespace: cp.Namespace, + Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -198,7 +471,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1. // clean up this service, but there is a hope that this object will // be garbage collected if something went wrong and operator didn't // deleted it. - OwnerReferences: c.ownerReferences(), + OwnerReferences: pooler_interface.pooler.ownerReferences(), }, Spec: serviceSpec, } @@ -206,22 +479,23 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1. return service } -// delete connection pooler -func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role cluster.PostgresRole, c cluster.Cluster) (err error) { - c.setProcessName("deleting connection pooler") - c.logger.Debugln("deleting connection pooler") +// TODO: how to use KubeClient, opconfig, deleteSecret, credentialSecretName from cluster package +//delete connection pooler +func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role PostgresRole) (err error) { + //c.setProcessName("deleting connection pooler") + cp.logger.Debugln("deleting connection pooler") // Lack of connection pooler objects is not a fatal error, just log it if // it was present before in the manifest - if c.ConnectionPooler == nil { - c.logger.Infof("No connection pooler to delete") + if cp == nil { + cp.logger.Infof("No connection pooler to delete") return nil } // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate var deployment *appsv1.Deployment - deployment = c.ConnectionPooler[role].Deployment + deployment = cp.Deployment policy := metav1.DeletePropagationForeground options := metav1.DeleteOptions{PropagationPolicy: &policy} @@ -232,64 +506,65 @@ func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role cluster.PostgresR // also deleted. err = c.KubeClient. - Deployments(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) + Deployments(cp.Namespace). + Delete(context.TODO(), cp.connectionPoolerName(role), options) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler deployment was already deleted") + cp.logger.Debugf("Connection pooler deployment was already deleted") } else if err != nil { return fmt.Errorf("could not delete deployment: %v", err) } - c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) + cp.logger.Infof("Connection pooler deployment %q has been deleted", cp.connectionPoolerName(role)) } // Repeat the same for the service object var service *v1.Service - service = c.ConnectionPooler[role].Service + service = cp.Service if service != nil { err = c.KubeClient. - Services(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) + Services(cp.Namespace). + Delete(context.TODO(), cp.connectionPoolerName(role), options) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler service was already deleted") + cp.logger.Debugf("Connection pooler service was already deleted") } else if err != nil { return fmt.Errorf("could not delete service: %v", err) } - c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) + cp.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) } // Repeat the same for the secret object - secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) + secretName := pooler_interface.pooler.credentialSecretName(c.OpConfig.ConnectionPooler.User) secret, err := c.KubeClient. - Secrets(c.Namespace). + Secrets(cp.Namespace). Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { - c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) + cp.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) } else { - if err = c.deleteSecret(secret.UID, *secret); err != nil { + if err = pooler_interface.pooler.deleteSecret(secret.UID, *secret); err != nil { return fmt.Errorf("could not delete pooler secret: %v", err) } } - c.ConnectionPooler = nil + cp = nil return nil } +//TODO: use KubeClient from cluster package // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. -func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { - c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler[role].Deployment == nil { +func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { + //c.setProcessName("updating connection pooler") + if cp == nil || cp.Deployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } - patchData, err := specPatch(newDeployment.Spec) + patchData, err := pooler_interface.pooler.specPatch(newDeployment.Spec) if err != nil { return nil, fmt.Errorf("could not form patch for the deployment: %v", err) } @@ -298,9 +573,9 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymen // worker at one time will try to update it chances of conflicts are // minimal. deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + Deployments(cp.Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler[role].Deployment.Name, + cp.Deployment.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, @@ -309,21 +584,22 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymen return nil, fmt.Errorf("could not patch deployment: %v", err) } - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment return deployment, nil } +//TODO use Kubeclient //updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role cluster.PostgresRole, c cluster.Cluster) (*appsv1.Deployment, error) { - c.logger.Debugf("updating connection pooler annotations") - patchData, err := metaAnnotationsPatch(annotations) +func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { + cp.logger.Debugf("updating connection pooler annotations") + patchData, err := pooler_interface.pooler.metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) } - result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + result, err := c.KubeClient.Deployments(cp.Deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler[role].Deployment.Name, + cp.Deployment.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}, @@ -335,18 +611,16 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations } -//sync connection pooler - // Test if two connection pooler configuration needs to be synced. For simplicity // compare not the actual K8S objects, but the configuration itself and request // sync if there is any difference. -func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, c cluster.Cluster) (sync bool, reasons []string) { +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { reasons = []string{} sync = false changelog, err := diff.Diff(oldSpec, newSpec) if err != nil { - c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + cp.logger.Infof("Cannot get diff, do not do anything, %+v", err) return false, reasons } @@ -363,9 +637,10 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpe return sync, reasons } +//TODO use opConfig from cluster package // Check if we need to synchronize connection pooler deployment due to new // defaults, that are different from what we see in the DeploymentSpec -func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment, c cluster.Cluster) (sync bool, reasons []string) { +func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { reasons = []string{} sync = false @@ -396,14 +671,14 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 reasons = append(reasons, msg) } - expectedResources, err := generateResourceRequirements(spec.Resources, - c.makeDefaultConnectionPoolerResources()) + expectedResources, err := pooler_interface.pooler.generateResourceRequirements(spec.Resources, + cp.makeDefaultConnectionPoolerResources()) // An error to generate expected resources means something is not quite // right, but for the purpose of robustness do not panic here, just report // and ignore resources comparison (in the worst case there will be no // updates for new resource values). - if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { + if err == nil && pooler_interface.pooler.syncResources(&poolerContainer.Resources, expectedResources) { sync = true msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", poolerContainer.Resources, expectedResources) @@ -411,14 +686,14 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 } if err != nil { - c.logger.Warningf("Cannot generate expected resources, %v", err) + cp.logger.Warningf("Cannot generate expected resources, %v", err) } for _, env := range poolerContainer.Env { if spec.User == "" && env.Name == "PGUSER" { ref := env.ValueFrom.SecretKeyRef.LocalObjectReference - if ref.Name != c.credentialSecretName(config.User) { + if ref.Name != pooler_interface.pooler.credentialSecretName(config.User) { sync = true msg := fmt.Sprintf("pooler user is different (having %s, required %s)", ref.Name, config.User) @@ -437,7 +712,29 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 return sync, reasons } -func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup cluster.InstallFunction, c cluster.Cluster) (SyncReason, error) { +//TODO use OpConfig from cluster package +// Generate default resource section for connection pooler deployment, to be +// used if nothing custom is specified in the manifest +func (cp ConnectionPoolerObjects) makeDefaultConnectionPoolerResources() acidv1.Resources { + config := c.OpConfig + + defaultRequests := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, + } + defaultLimits := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, + } + + return acidv1.Resources{ + ResourceRequests: defaultRequests, + ResourceLimits: defaultLimits, + } +} + +//TODO use opConfig +func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) (SyncReason, error) { var reason SyncReason var err error @@ -445,18 +742,17 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 // Check and perform the sync requirements for each of the roles. for _, role := range [2]PostgresRole{Master, Replica} { - if role == Master { - newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) + if role == cluster.Master { + newNeedConnectionPooler = cp.needMasterConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = cp.needMasterConnectionPoolerWorker(&oldSpec.Spec) } else { - newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) + newNeedConnectionPooler = cp.needReplicaConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = cp.needReplicaConnectionPoolerWorker(&oldSpec.Spec) } - if c.ConnectionPooler == nil { - c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) - c.ConnectionPooler[role].Deployment = nil - c.ConnectionPooler[role].Service = nil - c.ConnectionPooler[role].LookupFunction = false + if cp == nil { + cp.Deployment = nil + cp.Service = nil + cp.LookupFunction = false } if newNeedConnectionPooler { @@ -465,11 +761,11 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 // since it could happen that there is no difference in specs, and all // the resources are remembered, but the deployment was manually deleted // in between - c.logger.Debug("syncing connection pooler for the role %v", role) + cp.logger.Debug("syncing connection pooler for the role %v", role) // in this case also do not forget to install lookup function as for // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { + if !oldNeedConnectionPooler || !cp.LookupFunction { newConnectionPooler := newSpec.Spec.ConnectionPooler specSchema := "" @@ -493,54 +789,36 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 } } - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) + if reason, err = cp.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { + cp.logger.Errorf("could not sync connection pooler: %v", err) return reason, err } } if oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { + if cp != nil && + (cp.Deployment != nil || + cp.Service != nil) { - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + if err = cp.deleteConnectionPooler(role); err != nil { + cp.logger.Warningf("could not remove connection pooler: %v", err) } } - if c.ConnectionPooler != nil && c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil + if cp != nil && cp.Deployment == nil && cp.Service == nil { + cp = nil } } if !oldNeedConnectionPooler && !newNeedConnectionPooler { // delete and cleanup resources if not empty - otherRole := role - if len(c.RolesConnectionPooler()) == 2 { - if role == Master { - otherRole = Replica - } else { - otherRole = Master - } - } - if c.ConnectionPooler != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { + if cp != nil && + (cp.Deployment != nil || + cp.Service != nil) { - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + if err = cp.deleteConnectionPooler(role); err != nil { + cp.logger.Warningf("could not remove connection pooler: %v", err) } - } else if c.ConnectionPooler[otherRole].Deployment == nil && c.ConnectionPooler[otherRole].Service == nil { - c.ConnectionPooler = nil } } } @@ -548,22 +826,23 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 return reason, nil } +//TODO use Kubeclient, AnnotationsToPropagate from cluster package // Synchronize connection pooler resources. Effectively we're interested only in // synchronizing the corresponding deployment, but in case of deployment or // service is missing, create it. After checking, also remember an object for // the future references. -func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role cluster.PostgresRole, c cluster.Cluster) ( +func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( SyncReason, error) { deployment, err := c.KubeClient. - Deployments(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + Deployments(cp.Namespace). + Get(context.TODO(), cp.connectionPoolerName(role), metav1.GetOptions{}) if err != nil && k8sutil.ResourceNotFound(err) { msg := "Deployment %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) + cp.logger.Warningf(msg, cp.connectionPoolerName(role)) - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + deploymentSpec, err := cp.generateConnectionPoolerDeployment(&newSpec.Spec, role) if err != nil { msg = "could not generate deployment for connection pooler: %v" return NoSync, fmt.Errorf(msg, err) @@ -576,12 +855,12 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * if err != nil { return NoSync, err } - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment } else if err != nil { msg := "could not get connection pooler deployment to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment // actual synchronization oldConnectionPooler := oldSpec.Spec.ConnectionPooler @@ -600,24 +879,24 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * newConnectionPooler = &acidv1.ConnectionPooler{} } - c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) + cp.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) - specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) + specSync, specReason := cp.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + defaultsSync, defaultsReason := cp.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) reason := append(specReason, defaultsReason...) if specSync || defaultsSync { - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(role), reason) - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + cp.logger.Infof("Update connection pooler deployment %s, reason: %+v", + cp.connectionPoolerName(role), reason) + newDeploymentSpec, err := cp.generateConnectionPoolerDeployment(&newSpec.Spec, role) if err != nil { msg := "could not generate deployment for connection pooler: %v" return reason, fmt.Errorf(msg, err) } - oldDeploymentSpec := c.ConnectionPooler[role].Deployment + oldDeploymentSpec := cp.Deployment - deployment, err := c.updateConnectionPoolerDeployment( + deployment, err := cp.updateConnectionPoolerDeployment( oldDeploymentSpec, newDeploymentSpec, role) @@ -625,26 +904,26 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * if err != nil { return reason, err } - c.ConnectionPooler[role].Deployment = deployment + cp.Deployment = deployment return reason, nil } } - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) + newAnnotations := pooler_interface.pooler.AnnotationsToPropagate(cp.Deployment.Annotations) if newAnnotations != nil { - c.updateConnectionPoolerAnnotations(newAnnotations, role) + cp.updateConnectionPoolerAnnotations(newAnnotations, role) } service, err := c.KubeClient. - Services(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + Services(cp.Namespace). + Get(context.TODO(), cp.connectionPoolerName(role), metav1.GetOptions{}) if err != nil && k8sutil.ResourceNotFound(err) { msg := "Service %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName(role)) + cp.logger.Warningf(msg, cp.connectionPoolerName(role)) - serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) + serviceSpec := cp.generateConnectionPoolerService(&newSpec.Spec, role) service, err := c.KubeClient. Services(serviceSpec.Namespace). Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) @@ -652,14 +931,14 @@ func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec * if err != nil { return NoSync, err } - c.ConnectionPooler[role].Service = service + cp.Service = service } else if err != nil { msg := "could not get connection pooler service to sync: %v" return NoSync, fmt.Errorf(msg, err) } else { // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler[role].Service = service + cp.Service = service } return NoSync, nil diff --git a/pkg/pooler_interface/pooler_interface.go b/pkg/pooler_interface/pooler_interface.go new file mode 100644 index 000000000..91f874778 --- /dev/null +++ b/pkg/pooler_interface/pooler_interface.go @@ -0,0 +1,18 @@ +package pooler_interface + +//functions of cluster package used in connection_pooler package +type pooler interface { + (c *Cluster) credentialSecretName(username string) string + (c *Cluster) serviceAddress(role PostgresRole) string + (c *Cluster) servicePort(role PostgresRole) string + generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) + (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]string + (c *Cluster) ownerReferences() []metav1.OwnerReference + (c *Cluster) credentialSecretName(username string) + (c *Cluster) deleteSecrets() error + specPatch(spec interface{}) ([]byte, error) + metaAnnotationsPatch(annotations map[string]string) ([]byte, error) + generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) + syncResources(a, b *v1.ResourceRequirements) bool + (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string +} From 4fc7e8dc04d9ebabd296ddd5c91336f9c5b25f90 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 7 Oct 2020 10:20:15 +0200 Subject: [PATCH 25/40] place config in a common package --- pkg/cluster/cluster.go | 18 ++------- pkg/connection_pooler/connection_pooler.go | 43 +++++++++------------- pkg/resources/resources.go | 20 ++++++++++ 3 files changed, 41 insertions(+), 40 deletions(-) create mode 100644 pkg/resources/resources.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 39434b1b6..3c1f15feb 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -18,7 +18,6 @@ import ( policybeta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/tools/reference" @@ -27,16 +26,14 @@ import ( "github.com/zalando/postgres-operator/pkg/connection_pooler" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" + "github.com/zalando/postgres-operator/pkg/resources" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" - "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" - - rbacv1 "k8s.io/api/rbac/v1" ) var ( @@ -46,15 +43,6 @@ var ( patroniObjectSuffixes = []string{"config", "failover", "sync"} ) -// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. -type Config struct { - OpConfig config.Config - RestConfig *rest.Config - InfrastructureRoles map[string]spec.PgUser // inherited from the controller - PodServiceAccount *v1.ServiceAccount - PodServiceAccountRoleBinding *rbacv1.RoleBinding -} - type kubeResources struct { Services map[PostgresRole]*v1.Service Endpoints map[PostgresRole]*v1.Endpoints @@ -69,7 +57,7 @@ type kubeResources struct { type Cluster struct { kubeResources acidv1.Postgresql - Config + resources.Config logger *logrus.Entry eventRecorder record.EventRecorder patroni patroni.Interface @@ -99,7 +87,7 @@ type compareStatefulsetResult struct { } // New creates a new cluster. This function should be called from a controller. -func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster { +func New(cfg resources.Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster { deletePropagationPolicy := metav1.DeletePropagationOrphan podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) { diff --git a/pkg/connection_pooler/connection_pooler.go b/pkg/connection_pooler/connection_pooler.go index cf24f808a..6ff7b4c61 100644 --- a/pkg/connection_pooler/connection_pooler.go +++ b/pkg/connection_pooler/connection_pooler.go @@ -7,7 +7,7 @@ import ( "github.com/r3labs/diff" "github.com/sirupsen/logrus" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/pooler_interface" + "github.com/zalando/postgres-operator/pkg/cluster" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/zalando/postgres-operator/pkg/resources" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" @@ -146,12 +147,12 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction schema := c.Spec.ConnectionPooler.Schema if schema == "" { - schema = c.OpConfig.ConnectionPooler.Schema + schema = resources.OpConfig.ConnectionPooler.Schema } user := c.Spec.ConnectionPooler.User if user == "" { - user = c.OpConfig.ConnectionPooler.User + user = resources.OpConfig.ConnectionPooler.User } err := lookup(schema, user, role) @@ -200,7 +201,6 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction return c.ConnectionPooler[role], nil } -//TODO: Figure out how can we go about for the opconfig required here! // // Generate pool size related environment variables. // @@ -222,18 +222,18 @@ func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction func (cp *ConnectionPoolerObjects) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { effectiveMode := util.Coalesce( spec.ConnectionPooler.Mode, - c.OpConfig.ConnectionPooler.Mode) + resources.OpConfig.ConnectionPooler.Mode) numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, + resources.OpConfig.ConnectionPooler.NumberOfInstances, k8sutil.Int32ToPointer(1)) } effectiveMaxDBConn := util.CoalesceInt32( spec.ConnectionPooler.MaxDBConnections, - c.OpConfig.ConnectionPooler.MaxDBConnections) + resources.OpConfig.ConnectionPooler.MaxDBConnections) if effectiveMaxDBConn == nil { effectiveMaxDBConn = k8sutil.Int32ToPointer( @@ -278,22 +278,21 @@ func (cp *ConnectionPoolerObjects) getConnectionPoolerEnvVars(spec *acidv1.Postg } } -// TODO: Figure out how can we go about for the opconfig required here! func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( *v1.PodTemplateSpec, error) { - gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) + gracePeriod := int64(resources.OpConfig.PodTerminateGracePeriod.Seconds()) resources, err := pooler_interface.pooler.pooler.generateResourceRequirements( spec.ConnectionPooler.Resources, cp.makeDefaultConnectionPoolerResources()) effectiveDockerImage := util.Coalesce( spec.ConnectionPooler.DockerImage, - c.OpConfig.ConnectionPooler.Image) + resources.OpConfig.ConnectionPooler.Image) effectiveSchema := util.Coalesce( spec.ConnectionPooler.Schema, - c.OpConfig.ConnectionPooler.Schema) + resources.OpConfig.ConnectionPooler.Schema) if err != nil { return nil, fmt.Errorf("could not generate resource requirements: %v", err) @@ -302,7 +301,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *aci secretSelector := func(key string) *v1.SecretKeySelector { effectiveUser := util.Coalesce( spec.ConnectionPooler.User, - c.OpConfig.ConnectionPooler.User) + resources.OpConfig.ConnectionPooler.User) return &v1.SecretKeySelector{ LocalObjectReference: v1.LocalObjectReference{ @@ -364,7 +363,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *aci Annotations: pooler_interface.pooler.pooler.generatePodAnnotations(spec), }, Spec: v1.PodSpec{ - ServiceAccountName: c.OpConfig.PodServiceAccountName, + ServiceAccountName: resources.OpConfig.PodServiceAccountName, TerminationGracePeriodSeconds: &gracePeriod, Containers: []v1.Container{poolerContainer}, // TODO: add tolerations to scheduler pooler on the same node @@ -376,7 +375,6 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *aci return podTemplate, nil } -//TODO: How to use opconfig from cluster type func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( *appsv1.Deployment, error) { @@ -394,7 +392,7 @@ func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acid numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, + resources.OpConfig.ConnectionPooler.NumberOfInstances, k8sutil.Int32ToPointer(1)) } @@ -537,7 +535,7 @@ func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role PostgresRole) (er cp.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) } // Repeat the same for the secret object - secretName := pooler_interface.pooler.credentialSecretName(c.OpConfig.ConnectionPooler.User) + secretName := pooler_interface.pooler.credentialSecretName(resources.OpConfig.ConnectionPooler.User) secret, err := c.KubeClient. Secrets(cp.Namespace). @@ -555,7 +553,6 @@ func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role PostgresRole) (er return nil } -//TODO: use KubeClient from cluster package // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { @@ -589,7 +586,6 @@ func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymen return deployment, nil } -//TODO use Kubeclient //updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { cp.logger.Debugf("updating connection pooler annotations") @@ -637,7 +633,6 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpe return sync, reasons } -//TODO use opConfig from cluster package // Check if we need to synchronize connection pooler deployment due to new // defaults, that are different from what we see in the DeploymentSpec func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { @@ -645,7 +640,7 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 reasons = []string{} sync = false - config := c.OpConfig.ConnectionPooler + config := resources.OpConfig.ConnectionPooler podTemplate := deployment.Spec.Template poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] @@ -712,11 +707,10 @@ func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1 return sync, reasons } -//TODO use OpConfig from cluster package // Generate default resource section for connection pooler deployment, to be // used if nothing custom is specified in the manifest func (cp ConnectionPoolerObjects) makeDefaultConnectionPoolerResources() acidv1.Resources { - config := c.OpConfig + config := resources.OpConfig defaultRequests := acidv1.ResourceDescription{ CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, @@ -733,7 +727,6 @@ func (cp ConnectionPoolerObjects) makeDefaultConnectionPoolerResources() acidv1. } } -//TODO use opConfig func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) (SyncReason, error) { var reason SyncReason @@ -778,11 +771,11 @@ func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1 schema := util.Coalesce( specSchema, - c.OpConfig.ConnectionPooler.Schema) + resources.OpConfig.ConnectionPooler.Schema) user := util.Coalesce( specUser, - c.OpConfig.ConnectionPooler.User) + resources.OpConfig.ConnectionPooler.User) if err = lookup(schema, user, role); err != nil { return NoSync, err diff --git a/pkg/resources/resources.go b/pkg/resources/resources.go new file mode 100644 index 000000000..fc43018c4 --- /dev/null +++ b/pkg/resources/resources.go @@ -0,0 +1,20 @@ +package resources + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + + "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/util/config" + + rbacv1 "k8s.io/api/rbac/v1" +) + +// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. +type Config struct { + OpConfig config.Config + RestConfig *rest.Config + InfrastructureRoles map[string]spec.PgUser // inherited from the controller + PodServiceAccount *v1.ServiceAccount + PodServiceAccountRoleBinding *rbacv1.RoleBinding +} From f7098b8562da9e1b399b861a2e982496c13922cd Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 19 Oct 2020 09:37:35 +0200 Subject: [PATCH 26/40] Modify packages Kepp connectionPooler in cluster package itself. This helps in getting rid of previously added packages and interfaces. Based on the new structure of ConnectionPooler in cluster, there are functional changes to come. This commit is only for updating and cleaning of packages. --- pkg/cluster/cluster.go | 34 +- pkg/cluster/sync.go | 6 +- pkg/connection_pooler/connection_pooler.go | 938 --------------------- pkg/pooler_interface/pooler_interface.go | 18 - pkg/resources/resources.go | 20 - 5 files changed, 23 insertions(+), 993 deletions(-) delete mode 100644 pkg/connection_pooler/connection_pooler.go delete mode 100644 pkg/pooler_interface/pooler_interface.go delete mode 100644 pkg/resources/resources.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 3c1f15feb..ca0d1ffb0 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -18,22 +18,23 @@ import ( policybeta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/tools/reference" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/connection_pooler" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" - "github.com/zalando/postgres-operator/pkg/resources" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" + rbacv1 "k8s.io/api/rbac/v1" ) var ( @@ -43,6 +44,15 @@ var ( patroniObjectSuffixes = []string{"config", "failover", "sync"} ) +// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. +type Config struct { + OpConfig config.Config + RestConfig *rest.Config + InfrastructureRoles map[string]spec.PgUser // inherited from the controller + PodServiceAccount *v1.ServiceAccount + PodServiceAccountRoleBinding *rbacv1.RoleBinding +} + type kubeResources struct { Services map[PostgresRole]*v1.Service Endpoints map[PostgresRole]*v1.Endpoints @@ -57,7 +67,7 @@ type kubeResources struct { type Cluster struct { kubeResources acidv1.Postgresql - resources.Config + Config logger *logrus.Entry eventRecorder record.EventRecorder patroni patroni.Interface @@ -77,7 +87,7 @@ type Cluster struct { currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex - ConnectionPooler map[PostgresRole]*connection_pooler.ConnectionPoolerObjects + ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects } type compareStatefulsetResult struct { match bool @@ -87,7 +97,7 @@ type compareStatefulsetResult struct { } // New creates a new cluster. This function should be called from a controller. -func New(cfg resources.Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster { +func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster { deletePropagationPolicy := metav1.DeletePropagationOrphan podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) { @@ -332,7 +342,7 @@ func (c *Cluster) Create() error { c.logger.Warning("Connection pooler already exists in the cluster") return nil } - connectionPooler, err := c.ConnectionPooler[r].createConnectionPooler(c.installLookupFunction, r) + connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) if err != nil { c.logger.Warningf("could not create connection pooler: %v", err) return nil @@ -762,12 +772,10 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } // sync connection pooler - for _, role := range c.RolesConnectionPooler() { - if _, err := c.ConnectionPooler[role].syncConnectionPooler(oldSpec, newSpec, - c.installLookupFunction); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) - updateFailed = true - } + if _, err := c.syncConnectionPooler(oldSpec, newSpec, + c.installLookupFunction); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + updateFailed = true } return nil @@ -836,7 +844,7 @@ func (c *Cluster) Delete() { // manifest, just to not keep orphaned components in case if something went // wrong for _, role := range [2]PostgresRole{Master, Replica} { - if err := c.ConnectionPooler[role].deleteConnectionPooler(role); err != nil { + if err := c.deleteConnectionPooler(role); err != nil { c.logger.Warningf("could not remove connection pooler: %v", err) } } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 0738f2b77..49408fbf7 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -128,10 +128,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } // sync connection pooler - for _, role := range c.RolesConnectionPooler() { - if _, err = c.ConnectionPooler[role].syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { - return fmt.Errorf("could not sync connection pooler: %v", err) - } + if _, err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { + return fmt.Errorf("could not sync connection pooler: %v", err) } return err diff --git a/pkg/connection_pooler/connection_pooler.go b/pkg/connection_pooler/connection_pooler.go deleted file mode 100644 index 6ff7b4c61..000000000 --- a/pkg/connection_pooler/connection_pooler.go +++ /dev/null @@ -1,938 +0,0 @@ -package connection_pooler - -import ( - "context" - "fmt" - - "github.com/r3labs/diff" - "github.com/sirupsen/logrus" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/cluster" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/zalando/postgres-operator/pkg/resources" - "github.com/zalando/postgres-operator/pkg/util" - "github.com/zalando/postgres-operator/pkg/util/constants" - "github.com/zalando/postgres-operator/pkg/util/k8sutil" -) - -const ( - connectionPoolerContainer = "connection-pooler" - pgPort = 5432 -) - -// K8S objects that are belongs to a connection pooler -type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service - Name string - ClusterName string - Namespace string - logger *logrus.Entry - // It could happen that a connection pooler was enabled, but the operator - // was not able to properly process a corresponding event or was restarted. - // In this case we will miss missing/require situation and a lookup function - // will not be installed. To avoid synchronizing it all the time to prevent - // this, we can remember the result in memory at least until the next - // restart. - LookupFunction bool -} - -type SyncReason []string - -// no sync happened, empty value -var NoSync SyncReason = []string{} - -// PostgresRole describes role of the node -type PostgresRole string - -const ( - // Master role - Master PostgresRole = "master" - - // Replica role - Replica PostgresRole = "replica" -) - -type InstallFunction func(schema string, user string, role PostgresRole) error - -func (cp *ConnectionPoolerObjects) connectionPoolerName(role PostgresRole) string { - name := cp.ClusterName + "-pooler" - if role == Replica { - name = name + "-repl" - } - return name -} - -// isConnectionPoolerEnabled -func (cp *ConnectionPoolerObjects) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) -} - -func (cp *ConnectionPoolerObjects) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler -} - -//TODO: use spec from cluster -func (cp *ConnectionPoolerObjects) needMasterConnectionPooler() bool { - return cp.needMasterConnectionPoolerWorker(&c.Spec) -} - -func (cp *ConnectionPoolerObjects) needConnectionPooler() bool { - return cp.needMasterConnectionPoolerWorker(&c.Spec) || cp.needReplicaConnectionPoolerWorker(&c.Spec) -} - -// RolesConnectionPooler gives the list of roles which need connection pooler -func (cp *ConnectionPoolerObjects) RolesConnectionPooler() []PostgresRole { - roles := make([]PostgresRole, 2) - - if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Master) - } - if c.needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Replica) - } - return roles -} - -func (cp *ConnectionPoolerObjects) needReplicaConnectionPooler() bool { - return cp.needReplicaConnectionPoolerWorker(&c.Spec) -} - -// Return connection pooler labels selector, which should from one point of view -// inherit most of the labels from the cluster itself, but at the same time -// have e.g. different `application` label, so that recreatePod operation will -// not interfere with it (it lists all the pods via labels, and if there would -// be no difference, it will recreate also pooler pods). -func (cp *ConnectionPoolerObjects) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { - connectionPoolerLabels := labels.Set(map[string]string{}) - - extraLabels := labels.Set(map[string]string{ - "connection-pooler-name": cp.connectionPoolerName(role), - "application": "db-connection-pooler", - "role": string(role), - "cluster-name": cp.ClusterName, - }) - - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels) - - return &metav1.LabelSelector{ - MatchLabels: connectionPoolerLabels, - MatchExpressions: nil, - } -} - -//TODO: how to use cluster type! -// Prepare the database for connection pooler to be used, i.e. install lookup -// function (do it first, because it should be fast and if it didn't succeed, -// it doesn't makes sense to create more K8S objects. At this moment we assume -// that necessary connection pooler user exists. -// -// After that create all the objects for connection pooler, namely a deployment -// with a chosen pooler and a service to expose it. - -// have connectionpooler name in the cp object to have it immutable name -// add these cp related functions to a new cp file -// opConfig, cluster, and database name -func (cp *ConnectionPoolerObjects) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { - var msg string - c.setProcessName("creating connection pooler") - - schema := c.Spec.ConnectionPooler.Schema - - if schema == "" { - schema = resources.OpConfig.ConnectionPooler.Schema - } - - user := c.Spec.ConnectionPooler.User - if user == "" { - user = resources.OpConfig.ConnectionPooler.User - } - - err := lookup(schema, user, role) - - if err != nil { - msg = "could not prepare database for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - if c.ConnectionPooler[role] == nil { - c.ConnectionPooler = make(map[PostgresRole]*ConnectionPoolerObjects) - c.ConnectionPooler[role].Deployment = nil - c.ConnectionPooler[role].Service = nil - c.ConnectionPooler[role].LookupFunction = false - } - deploymentSpec, err := c.ConnectionPooler[role].generateConnectionPoolerDeployment(&c.Spec, role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - - serviceSpec := c.ConnectionPooler[role].generateConnectionPoolerService(&c.Spec, role) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - c.ConnectionPooler[role].Deployment = deployment - c.ConnectionPooler[role].Service = service - - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - - return c.ConnectionPooler[role], nil -} - -// -// Generate pool size related environment variables. -// -// MAX_DB_CONN would specify the global maximum for connections to a target -// database. -// -// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. -// -// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when -// most of the queries coming through a connection pooler are from the same -// user to the same db). In case if we want to spin up more connection pooler -// instances, take this into account and maintain the same number of -// connections. -// -// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload -// have to wait for spinning up a new connections. -// -// RESERVE_SIZE is how many additional connections to allow for a pooler. -func (cp *ConnectionPoolerObjects) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { - effectiveMode := util.Coalesce( - spec.ConnectionPooler.Mode, - resources.OpConfig.ConnectionPooler.Mode) - - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - resources.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - effectiveMaxDBConn := util.CoalesceInt32( - spec.ConnectionPooler.MaxDBConnections, - resources.OpConfig.ConnectionPooler.MaxDBConnections) - - if effectiveMaxDBConn == nil { - effectiveMaxDBConn = k8sutil.Int32ToPointer( - constants.ConnectionPoolerMaxDBConnections) - } - - maxDBConn := *effectiveMaxDBConn / *numberOfInstances - - defaultSize := maxDBConn / 2 - minSize := defaultSize / 2 - reserveSize := minSize - - return []v1.EnvVar{ - { - Name: "CONNECTION_POOLER_PORT", - Value: fmt.Sprint(pgPort), - }, - { - Name: "CONNECTION_POOLER_MODE", - Value: effectiveMode, - }, - { - Name: "CONNECTION_POOLER_DEFAULT_SIZE", - Value: fmt.Sprint(defaultSize), - }, - { - Name: "CONNECTION_POOLER_MIN_SIZE", - Value: fmt.Sprint(minSize), - }, - { - Name: "CONNECTION_POOLER_RESERVE_SIZE", - Value: fmt.Sprint(reserveSize), - }, - { - Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", - Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), - }, - { - Name: "CONNECTION_POOLER_MAX_DB_CONN", - Value: fmt.Sprint(maxDBConn), - }, - } -} - -func (cp *ConnectionPoolerObjects) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( - *v1.PodTemplateSpec, error) { - - gracePeriod := int64(resources.OpConfig.PodTerminateGracePeriod.Seconds()) - resources, err := pooler_interface.pooler.pooler.generateResourceRequirements( - spec.ConnectionPooler.Resources, - cp.makeDefaultConnectionPoolerResources()) - - effectiveDockerImage := util.Coalesce( - spec.ConnectionPooler.DockerImage, - resources.OpConfig.ConnectionPooler.Image) - - effectiveSchema := util.Coalesce( - spec.ConnectionPooler.Schema, - resources.OpConfig.ConnectionPooler.Schema) - - if err != nil { - return nil, fmt.Errorf("could not generate resource requirements: %v", err) - } - - secretSelector := func(key string) *v1.SecretKeySelector { - effectiveUser := util.Coalesce( - spec.ConnectionPooler.User, - resources.OpConfig.ConnectionPooler.User) - - return &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: pooler_interface.pooler.pooler.credentialSecretName(effectiveUser), - }, - Key: key, - } - } - - envVars := []v1.EnvVar{ - { - Name: "PGHOST", - Value: pooler_interface.pooler.pooler.serviceAddress(role), - }, - { - Name: "PGPORT", - Value: pooler_interface.pooler.pooler.servicePort(role), - }, - { - Name: "PGUSER", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("username"), - }, - }, - // the convention is to use the same schema name as - // connection pooler username - { - Name: "PGSCHEMA", - Value: effectiveSchema, - }, - { - Name: "PGPASSWORD", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("password"), - }, - }, - } - - envVars = append(envVars, cp.getConnectionPoolerEnvVars(spec)...) - - poolerContainer := v1.Container{ - Name: connectionPoolerContainer, - Image: effectiveDockerImage, - ImagePullPolicy: v1.PullIfNotPresent, - Resources: *resources, - Ports: []v1.ContainerPort{ - { - ContainerPort: pgPort, - Protocol: v1.ProtocolTCP, - }, - }, - Env: envVars, - } - - podTemplate := &v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, - Namespace: cp.Namespace, - Annotations: pooler_interface.pooler.pooler.generatePodAnnotations(spec), - }, - Spec: v1.PodSpec{ - ServiceAccountName: resources.OpConfig.PodServiceAccountName, - TerminationGracePeriodSeconds: &gracePeriod, - Containers: []v1.Container{poolerContainer}, - // TODO: add tolerations to scheduler pooler on the same node - // as database - //Tolerations: *tolerationsSpec, - }, - } - - return podTemplate, nil -} - -func (cp *ConnectionPoolerObjects) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( - *appsv1.Deployment, error) { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - podTemplate, err := cp.generateConnectionPoolerPodTemplate(spec, role) - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - resources.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - if *numberOfInstances < constants.ConnectionPoolerMinInstances { - msg := "Adjusted number of connection pooler instances from %d to %d" - cp.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) - - *numberOfInstances = constants.ConnectionPoolerMinInstances - } - - if err != nil { - return nil, err - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: cp.connectionPoolerName(role), - Namespace: cp.Namespace, - Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this deployment, but there is a hope that this object - // will be garbage collected if something went wrong and operator - // didn't deleted it. - OwnerReferences: pooler_interface.pooler.ownerReferences(), - }, - Spec: appsv1.DeploymentSpec{ - Replicas: numberOfInstances, - Selector: cp.connectionPoolerLabelsSelector(role), - Template: *podTemplate, - }, - } - - return deployment, nil -} - -func (cp *ConnectionPoolerObjects) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - serviceSpec := v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: cp.connectionPoolerName(role), - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: pooler_interface.pooler.servicePort(role)}, - }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler": cp.connectionPoolerName(role), - }, - } - - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: cp.connectionPoolerName(role), - Namespace: cp.Namespace, - Labels: cp.connectionPoolerLabelsSelector(role).MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this service, but there is a hope that this object will - // be garbage collected if something went wrong and operator didn't - // deleted it. - OwnerReferences: pooler_interface.pooler.ownerReferences(), - }, - Spec: serviceSpec, - } - - return service -} - -// TODO: how to use KubeClient, opconfig, deleteSecret, credentialSecretName from cluster package -//delete connection pooler -func (cp *ConnectionPoolerObjects) deleteConnectionPooler(role PostgresRole) (err error) { - //c.setProcessName("deleting connection pooler") - cp.logger.Debugln("deleting connection pooler") - - // Lack of connection pooler objects is not a fatal error, just log it if - // it was present before in the manifest - if cp == nil { - cp.logger.Infof("No connection pooler to delete") - return nil - } - - // Clean up the deployment object. If deployment resource we've remembered - // is somehow empty, try to delete based on what would we generate - var deployment *appsv1.Deployment - deployment = cp.Deployment - - policy := metav1.DeletePropagationForeground - options := metav1.DeleteOptions{PropagationPolicy: &policy} - - if deployment != nil { - - // set delete propagation policy to foreground, so that replica set will be - // also deleted. - - err = c.KubeClient. - Deployments(cp.Namespace). - Delete(context.TODO(), cp.connectionPoolerName(role), options) - - if k8sutil.ResourceNotFound(err) { - cp.logger.Debugf("Connection pooler deployment was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete deployment: %v", err) - } - - cp.logger.Infof("Connection pooler deployment %q has been deleted", cp.connectionPoolerName(role)) - } - - // Repeat the same for the service object - var service *v1.Service - service = cp.Service - - if service != nil { - - err = c.KubeClient. - Services(cp.Namespace). - Delete(context.TODO(), cp.connectionPoolerName(role), options) - - if k8sutil.ResourceNotFound(err) { - cp.logger.Debugf("Connection pooler service was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete service: %v", err) - } - - cp.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) - } - // Repeat the same for the secret object - secretName := pooler_interface.pooler.credentialSecretName(resources.OpConfig.ConnectionPooler.User) - - secret, err := c.KubeClient. - Secrets(cp.Namespace). - Get(context.TODO(), secretName, metav1.GetOptions{}) - - if err != nil { - cp.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) - } else { - if err = pooler_interface.pooler.deleteSecret(secret.UID, *secret); err != nil { - return fmt.Errorf("could not delete pooler secret: %v", err) - } - } - - cp = nil - return nil -} - -// Perform actual patching of a connection pooler deployment, assuming that all -// the check were already done before. -func (cp *ConnectionPoolerObjects) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { - //c.setProcessName("updating connection pooler") - if cp == nil || cp.Deployment == nil { - return nil, fmt.Errorf("there is no connection pooler in the cluster") - } - - patchData, err := pooler_interface.pooler.specPatch(newDeployment.Spec) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment: %v", err) - } - - // An update probably requires RetryOnConflict, but since only one operator - // worker at one time will try to update it chances of conflicts are - // minimal. - deployment, err := c.KubeClient. - Deployments(cp.Deployment.Namespace).Patch( - context.TODO(), - cp.Deployment.Name, - types.MergePatchType, - patchData, - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch deployment: %v", err) - } - - cp.Deployment = deployment - - return deployment, nil -} - -//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (cp *ConnectionPoolerObjects) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { - cp.logger.Debugf("updating connection pooler annotations") - patchData, err := pooler_interface.pooler.metaAnnotationsPatch(annotations) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) - } - result, err := c.KubeClient.Deployments(cp.Deployment.Namespace).Patch( - context.TODO(), - cp.Deployment.Name, - types.MergePatchType, - []byte(patchData), - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) - } - return result, nil - -} - -// Test if two connection pooler configuration needs to be synced. For simplicity -// compare not the actual K8S objects, but the configuration itself and request -// sync if there is any difference. -func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { - reasons = []string{} - sync = false - - changelog, err := diff.Diff(oldSpec, newSpec) - if err != nil { - cp.logger.Infof("Cannot get diff, do not do anything, %+v", err) - return false, reasons - } - - if len(changelog) > 0 { - sync = true - } - - for _, change := range changelog { - msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", - change.Type, change.Path, change.From, change.To) - reasons = append(reasons, msg) - } - - return sync, reasons -} - -// Check if we need to synchronize connection pooler deployment due to new -// defaults, that are different from what we see in the DeploymentSpec -func (cp *ConnectionPoolerObjects) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { - - reasons = []string{} - sync = false - - config := resources.OpConfig.ConnectionPooler - podTemplate := deployment.Spec.Template - poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] - - if spec == nil { - spec = &acidv1.ConnectionPooler{} - } - - if spec.NumberOfInstances == nil && - *deployment.Spec.Replicas != *config.NumberOfInstances { - - sync = true - msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", - *deployment.Spec.Replicas, *config.NumberOfInstances) - reasons = append(reasons, msg) - } - - if spec.DockerImage == "" && - poolerContainer.Image != config.Image { - - sync = true - msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", - poolerContainer.Image, config.Image) - reasons = append(reasons, msg) - } - - expectedResources, err := pooler_interface.pooler.generateResourceRequirements(spec.Resources, - cp.makeDefaultConnectionPoolerResources()) - - // An error to generate expected resources means something is not quite - // right, but for the purpose of robustness do not panic here, just report - // and ignore resources comparison (in the worst case there will be no - // updates for new resource values). - if err == nil && pooler_interface.pooler.syncResources(&poolerContainer.Resources, expectedResources) { - sync = true - msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", - poolerContainer.Resources, expectedResources) - reasons = append(reasons, msg) - } - - if err != nil { - cp.logger.Warningf("Cannot generate expected resources, %v", err) - } - - for _, env := range poolerContainer.Env { - if spec.User == "" && env.Name == "PGUSER" { - ref := env.ValueFrom.SecretKeyRef.LocalObjectReference - - if ref.Name != pooler_interface.pooler.credentialSecretName(config.User) { - sync = true - msg := fmt.Sprintf("pooler user is different (having %s, required %s)", - ref.Name, config.User) - reasons = append(reasons, msg) - } - } - - if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { - sync = true - msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", - env.Value, config.Schema) - reasons = append(reasons, msg) - } - } - - return sync, reasons -} - -// Generate default resource section for connection pooler deployment, to be -// used if nothing custom is specified in the manifest -func (cp ConnectionPoolerObjects) makeDefaultConnectionPoolerResources() acidv1.Resources { - config := resources.OpConfig - - defaultRequests := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, - } - defaultLimits := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, - } - - return acidv1.Resources{ - ResourceRequests: defaultRequests, - ResourceLimits: defaultLimits, - } -} - -func (cp *ConnectionPoolerObjects) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) (SyncReason, error) { - - var reason SyncReason - var err error - var newNeedConnectionPooler, oldNeedConnectionPooler bool - - // Check and perform the sync requirements for each of the roles. - for _, role := range [2]PostgresRole{Master, Replica} { - if role == cluster.Master { - newNeedConnectionPooler = cp.needMasterConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = cp.needMasterConnectionPoolerWorker(&oldSpec.Spec) - } else { - newNeedConnectionPooler = cp.needReplicaConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = cp.needReplicaConnectionPoolerWorker(&oldSpec.Spec) - } - if cp == nil { - cp.Deployment = nil - cp.Service = nil - cp.LookupFunction = false - } - - if newNeedConnectionPooler { - // Try to sync in any case. If we didn't needed connection pooler before, - // it means we want to create it. If it was already present, still sync - // since it could happen that there is no difference in specs, and all - // the resources are remembered, but the deployment was manually deleted - // in between - cp.logger.Debug("syncing connection pooler for the role %v", role) - - // in this case also do not forget to install lookup function as for - // creating cluster - if !oldNeedConnectionPooler || !cp.LookupFunction { - newConnectionPooler := newSpec.Spec.ConnectionPooler - - specSchema := "" - specUser := "" - - if newConnectionPooler != nil { - specSchema = newConnectionPooler.Schema - specUser = newConnectionPooler.User - } - - schema := util.Coalesce( - specSchema, - resources.OpConfig.ConnectionPooler.Schema) - - user := util.Coalesce( - specUser, - resources.OpConfig.ConnectionPooler.User) - - if err = lookup(schema, user, role); err != nil { - return NoSync, err - } - } - - if reason, err = cp.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { - cp.logger.Errorf("could not sync connection pooler: %v", err) - return reason, err - } - } - - if oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources - if cp != nil && - (cp.Deployment != nil || - cp.Service != nil) { - - if err = cp.deleteConnectionPooler(role); err != nil { - cp.logger.Warningf("could not remove connection pooler: %v", err) - } - } - if cp != nil && cp.Deployment == nil && cp.Service == nil { - cp = nil - } - } - - if !oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources if not empty - if cp != nil && - (cp.Deployment != nil || - cp.Service != nil) { - - if err = cp.deleteConnectionPooler(role); err != nil { - cp.logger.Warningf("could not remove connection pooler: %v", err) - } - } - } - } - - return reason, nil -} - -//TODO use Kubeclient, AnnotationsToPropagate from cluster package -// Synchronize connection pooler resources. Effectively we're interested only in -// synchronizing the corresponding deployment, but in case of deployment or -// service is missing, create it. After checking, also remember an object for -// the future references. -func (cp *ConnectionPoolerObjects) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( - SyncReason, error) { - - deployment, err := c.KubeClient. - Deployments(cp.Namespace). - Get(context.TODO(), cp.connectionPoolerName(role), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Deployment %s for connection pooler synchronization is not found, create it" - cp.logger.Warningf(msg, cp.connectionPoolerName(role)) - - deploymentSpec, err := cp.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return NoSync, fmt.Errorf(msg, err) - } - - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - cp.Deployment = deployment - } else if err != nil { - msg := "could not get connection pooler deployment to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - cp.Deployment = deployment - - // actual synchronization - oldConnectionPooler := oldSpec.Spec.ConnectionPooler - newConnectionPooler := newSpec.Spec.ConnectionPooler - - // sync implementation below assumes that both old and new specs are - // not nil, but it can happen. To avoid any confusion like updating a - // deployment because the specification changed from nil to an empty - // struct (that was initialized somewhere before) replace any nil with - // an empty spec. - if oldConnectionPooler == nil { - oldConnectionPooler = &acidv1.ConnectionPooler{} - } - - if newConnectionPooler == nil { - newConnectionPooler = &acidv1.ConnectionPooler{} - } - - cp.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) - - specSync, specReason := cp.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := cp.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) - reason := append(specReason, defaultsReason...) - - if specSync || defaultsSync { - cp.logger.Infof("Update connection pooler deployment %s, reason: %+v", - cp.connectionPoolerName(role), reason) - newDeploymentSpec, err := cp.generateConnectionPoolerDeployment(&newSpec.Spec, role) - if err != nil { - msg := "could not generate deployment for connection pooler: %v" - return reason, fmt.Errorf(msg, err) - } - - oldDeploymentSpec := cp.Deployment - - deployment, err := cp.updateConnectionPoolerDeployment( - oldDeploymentSpec, - newDeploymentSpec, - role) - - if err != nil { - return reason, err - } - cp.Deployment = deployment - - return reason, nil - } - } - - newAnnotations := pooler_interface.pooler.AnnotationsToPropagate(cp.Deployment.Annotations) - if newAnnotations != nil { - cp.updateConnectionPoolerAnnotations(newAnnotations, role) - } - - service, err := c.KubeClient. - Services(cp.Namespace). - Get(context.TODO(), cp.connectionPoolerName(role), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Service %s for connection pooler synchronization is not found, create it" - cp.logger.Warningf(msg, cp.connectionPoolerName(role)) - - serviceSpec := cp.generateConnectionPoolerService(&newSpec.Spec, role) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - cp.Service = service - - } else if err != nil { - msg := "could not get connection pooler service to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - // Service updates are not supported and probably not that useful anyway - cp.Service = service - } - - return NoSync, nil -} diff --git a/pkg/pooler_interface/pooler_interface.go b/pkg/pooler_interface/pooler_interface.go deleted file mode 100644 index 91f874778..000000000 --- a/pkg/pooler_interface/pooler_interface.go +++ /dev/null @@ -1,18 +0,0 @@ -package pooler_interface - -//functions of cluster package used in connection_pooler package -type pooler interface { - (c *Cluster) credentialSecretName(username string) string - (c *Cluster) serviceAddress(role PostgresRole) string - (c *Cluster) servicePort(role PostgresRole) string - generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) - (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]string - (c *Cluster) ownerReferences() []metav1.OwnerReference - (c *Cluster) credentialSecretName(username string) - (c *Cluster) deleteSecrets() error - specPatch(spec interface{}) ([]byte, error) - metaAnnotationsPatch(annotations map[string]string) ([]byte, error) - generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) - syncResources(a, b *v1.ResourceRequirements) bool - (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string -} diff --git a/pkg/resources/resources.go b/pkg/resources/resources.go deleted file mode 100644 index fc43018c4..000000000 --- a/pkg/resources/resources.go +++ /dev/null @@ -1,20 +0,0 @@ -package resources - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/rest" - - "github.com/zalando/postgres-operator/pkg/spec" - "github.com/zalando/postgres-operator/pkg/util/config" - - rbacv1 "k8s.io/api/rbac/v1" -) - -// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. -type Config struct { - OpConfig config.Config - RestConfig *rest.Config - InfrastructureRoles map[string]spec.PgUser // inherited from the controller - PodServiceAccount *v1.ServiceAccount - PodServiceAccountRoleBinding *rbacv1.RoleBinding -} From 20f2fb7c60ffc523472a864aa22497912181fc65 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Mon, 19 Oct 2020 12:41:30 +0200 Subject: [PATCH 27/40] Update connection_pooler funcs accordingly --- pkg/cluster/connection_pooler.go | 930 +++++++++++++++++++++++++++++++ 1 file changed, 930 insertions(+) create mode 100644 pkg/cluster/connection_pooler.go diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go new file mode 100644 index 000000000..5a7a2b54d --- /dev/null +++ b/pkg/cluster/connection_pooler.go @@ -0,0 +1,930 @@ +package cluster + +import ( + "context" + "fmt" + + "github.com/r3labs/diff" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" +) + +// K8S objects that are belongs to a connection pooler +type ConnectionPoolerObjects struct { + Deployment *appsv1.Deployment + Service *v1.Service + Name string + ClusterName string + Namespace string + // It could happen that a connection pooler was enabled, but the operator + // was not able to properly process a corresponding event or was restarted. + // In this case we will miss missing/require situation and a lookup function + // will not be installed. To avoid synchronizing it all the time to prevent + // this, we can remember the result in memory at least until the next + // restart. + LookupFunction bool +} + +func (c *Cluster) connectionPoolerName(role PostgresRole) string { + name := c.Name + "-pooler" + if role == Replica { + name = name + "-repl" + } + c.logger.Warningf("found connection pooler name is %s , clustername is %s", name, c.Name) + return name +} + +// isConnectionPoolerEnabled +func (c *Cluster) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) +} + +func (c *Cluster) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler +} + +//TODO: use spec from cluster, or include connection pooler related flags to connection pooler package itself +func (c *Cluster) needMasterConnectionPooler() bool { + return c.needMasterConnectionPoolerWorker(&c.Spec) +} + +func (c *Cluster) needConnectionPooler() bool { + return c.needMasterConnectionPoolerWorker(&c.Spec) || c.needReplicaConnectionPoolerWorker(&c.Spec) +} + +// RolesConnectionPooler gives the list of roles which need connection pooler +func (c *Cluster) RolesConnectionPooler() []PostgresRole { + roles := make([]PostgresRole, 2) + + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles = append(roles, Master) + } + if c.needMasterConnectionPoolerWorker(&c.Spec) { + roles = append(roles, Replica) + } + return roles +} + +func (c *Cluster) needReplicaConnectionPooler() bool { + return c.needReplicaConnectionPoolerWorker(&c.Spec) +} + +// Return connection pooler labels selector, which should from one point of view +// inherit most of the labels from the cluster itself, but at the same time +// have e.g. different `application` label, so that recreatePod operation will +// not interfere with it (it lists all the pods via labels, and if there would +// be no difference, it will recreate also pooler pods). +func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { + connectionPoolerLabels := labels.Set(map[string]string{}) + + extraLabels := labels.Set(map[string]string{ + "connection-pooler-name": c.connectionPoolerName(role), + "application": "db-connection-pooler", + "role": string(role), + "cluster-name": c.Name, + "Namesapce": c.Namespace, + }) + + connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) + connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels) + + return &metav1.LabelSelector{ + MatchLabels: connectionPoolerLabels, + MatchExpressions: nil, + } +} + +// Prepare the database for connection pooler to be used, i.e. install lookup +// function (do it first, because it should be fast and if it didn't succeed, +// it doesn't makes sense to create more K8S objects. At this moment we assume +// that necessary connection pooler user exists. +// +// After that create all the objects for connection pooler, namely a deployment +// with a chosen pooler and a service to expose it. + +// have connectionpooler name in the cp object to have it immutable name +// add these cp related functions to a new cp file +// opConfig, cluster, and database name +func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { + var msg string + c.setProcessName("creating connection pooler") + + schema := c.Spec.ConnectionPooler.Schema + + if schema == "" { + schema = c.OpConfig.ConnectionPooler.Schema + } + + user := c.Spec.ConnectionPooler.User + if user == "" { + user = c.OpConfig.ConnectionPooler.User + } + + err := lookup(schema, user, role) + + if err != nil { + msg = "could not prepare database for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + if c.ConnectionPooler == nil { + c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{role: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }} + c.logger.Warningf("in creation, now the initialised object is %v %v", c.ConnectionPooler, c.ConnectionPooler[role]) + } + deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return nil, fmt.Errorf(msg, err) + } + + // client-go does retry 10 times (with NoBackoff by default) when the API + // believe a request can be retried and returns Retry-After header. This + // should be good enough to not think about it here. + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + + serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + c.ConnectionPooler[role].Deployment = deployment + c.ConnectionPooler[role].Service = service + + c.logger.Debugf("created new connection pooler %q, uid: %q", + util.NameFromMeta(deployment.ObjectMeta), deployment.UID) + + return c.ConnectionPooler[role], nil +} + +// +// Generate pool size related environment variables. +// +// MAX_DB_CONN would specify the global maximum for connections to a target +// database. +// +// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. +// +// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when +// most of the queries coming through a connection pooler are from the same +// user to the same db). In case if we want to spin up more connection pooler +// instances, take this into account and maintain the same number of +// connections. +// +// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload +// have to wait for spinning up a new connections. +// +// RESERVE_SIZE is how many additional connections to allow for a pooler. +func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { + effectiveMode := util.Coalesce( + spec.ConnectionPooler.Mode, + c.OpConfig.ConnectionPooler.Mode) + + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + effectiveMaxDBConn := util.CoalesceInt32( + spec.ConnectionPooler.MaxDBConnections, + c.OpConfig.ConnectionPooler.MaxDBConnections) + + if effectiveMaxDBConn == nil { + effectiveMaxDBConn = k8sutil.Int32ToPointer( + constants.ConnectionPoolerMaxDBConnections) + } + + maxDBConn := *effectiveMaxDBConn / *numberOfInstances + + defaultSize := maxDBConn / 2 + minSize := defaultSize / 2 + reserveSize := minSize + + return []v1.EnvVar{ + { + Name: "CONNECTION_POOLER_PORT", + Value: fmt.Sprint(pgPort), + }, + { + Name: "CONNECTION_POOLER_MODE", + Value: effectiveMode, + }, + { + Name: "CONNECTION_POOLER_DEFAULT_SIZE", + Value: fmt.Sprint(defaultSize), + }, + { + Name: "CONNECTION_POOLER_MIN_SIZE", + Value: fmt.Sprint(minSize), + }, + { + Name: "CONNECTION_POOLER_RESERVE_SIZE", + Value: fmt.Sprint(reserveSize), + }, + { + Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", + Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), + }, + { + Name: "CONNECTION_POOLER_MAX_DB_CONN", + Value: fmt.Sprint(maxDBConn), + }, + } +} + +func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( + *v1.PodTemplateSpec, error) { + + gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) + resources, err := generateResourceRequirements( + spec.ConnectionPooler.Resources, + c.makeDefaultConnectionPoolerResources()) + + effectiveDockerImage := util.Coalesce( + spec.ConnectionPooler.DockerImage, + c.OpConfig.ConnectionPooler.Image) + + effectiveSchema := util.Coalesce( + spec.ConnectionPooler.Schema, + c.OpConfig.ConnectionPooler.Schema) + + if err != nil { + return nil, fmt.Errorf("could not generate resource requirements: %v", err) + } + + secretSelector := func(key string) *v1.SecretKeySelector { + effectiveUser := util.Coalesce( + spec.ConnectionPooler.User, + c.OpConfig.ConnectionPooler.User) + + return &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: c.credentialSecretName(effectiveUser), + }, + Key: key, + } + } + + envVars := []v1.EnvVar{ + { + Name: "PGHOST", + Value: c.serviceAddress(role), + }, + { + Name: "PGPORT", + Value: c.servicePort(role), + }, + { + Name: "PGUSER", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("username"), + }, + }, + // the convention is to use the same schema name as + // connection pooler username + { + Name: "PGSCHEMA", + Value: effectiveSchema, + }, + { + Name: "PGPASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("password"), + }, + }, + } + envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...) + + poolerContainer := v1.Container{ + Name: connectionPoolerContainer, + Image: effectiveDockerImage, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resources, + Ports: []v1.ContainerPort{ + { + ContainerPort: pgPort, + Protocol: v1.ProtocolTCP, + }, + }, + Env: envVars, + ReadinessProbe: &v1.Probe{ + Handler: v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.IntOrString{IntVal: pgPort}, + }, + }, + }, + } + + podTemplate := &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Namespace: c.Namespace, + Annotations: c.generatePodAnnotations(spec), + }, + Spec: v1.PodSpec{ + ServiceAccountName: c.OpConfig.PodServiceAccountName, + TerminationGracePeriodSeconds: &gracePeriod, + Containers: []v1.Container{poolerContainer}, + // TODO: add tolerations to scheduler pooler on the same node + // as database + //Tolerations: *tolerationsSpec, + }, + } + + return podTemplate, nil +} + +func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( + *appsv1.Deployment, error) { + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + if *numberOfInstances < constants.ConnectionPoolerMinInstances { + msg := "Adjusted number of connection pooler instances from %d to %d" + c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) + + *numberOfInstances = constants.ConnectionPoolerMinInstances + } + + if err != nil { + return nil, err + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName(role), + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this deployment, but there is a hope that this object + // will be garbage collected if something went wrong and operator + // didn't deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: numberOfInstances, + Selector: c.connectionPoolerLabelsSelector(role), + Template: *podTemplate, + }, + } + + return deployment, nil +} + +func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + serviceSpec := v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: c.connectionPoolerName(role), + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, + }, + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(role), + }, + } + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.connectionPoolerName(role), + Namespace: c.Namespace, + Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Annotations: map[string]string{}, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this service, but there is a hope that this object will + // be garbage collected if something went wrong and operator didn't + // deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: serviceSpec, + } + + return service +} + +//delete connection pooler +func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { + //c.setProcessName("deleting connection pooler") + c.logger.Debugln("deleting connection pooler") + + // Lack of connection pooler objects is not a fatal error, just log it if + // it was present before in the manifest + if c == nil { + c.logger.Infof("No connection pooler to delete") + return nil + } + + // Clean up the deployment object. If deployment resource we've remembered + // is somehow empty, try to delete based on what would we generate + var deployment *appsv1.Deployment + deployment = c.ConnectionPooler[role].Deployment + + policy := metav1.DeletePropagationForeground + options := metav1.DeleteOptions{PropagationPolicy: &policy} + + if deployment != nil { + + // set delete propagation policy to foreground, so that replica set will be + // also deleted. + + err = c.KubeClient. + Deployments(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler deployment was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete deployment: %v", err) + } + + c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) + } + + // Repeat the same for the service object + var service *v1.Service + service = c.ConnectionPooler[role].Service + + if service != nil { + + err = c.KubeClient. + Services(c.Namespace). + Delete(context.TODO(), c.connectionPoolerName(role), options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("Connection pooler service was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete service: %v", err) + } + + c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) + } + // Repeat the same for the secret object + secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) + + secret, err := c.KubeClient. + Secrets(c.Namespace). + Get(context.TODO(), secretName, metav1.GetOptions{}) + + if err != nil { + c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err) + } else { + if err = c.deleteSecret(secret.UID, *secret); err != nil { + return fmt.Errorf("could not delete pooler secret: %v", err) + } + } + + c = nil + return nil +} + +// Perform actual patching of a connection pooler deployment, assuming that all +// the check were already done before. +func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { + //c.setProcessName("updating connection pooler") + if c == nil || c.ConnectionPooler[role].Deployment == nil { + return nil, fmt.Errorf("there is no connection pooler in the cluster") + } + + patchData, err := specPatch(newDeployment.Spec) + if err != nil { + return nil, fmt.Errorf("could not form patch for the deployment: %v", err) + } + + // An update probably requires RetryOnConflict, but since only one operator + // worker at one time will try to update it chances of conflicts are + // minimal. + deployment, err := c.KubeClient. + Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + context.TODO(), + c.ConnectionPooler[role].Deployment.Name, + types.MergePatchType, + patchData, + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch deployment: %v", err) + } + + c.ConnectionPooler[role].Deployment = deployment + + return deployment, nil +} + +//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment +func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { + c.logger.Debugf("updating connection pooler annotations") + patchData, err := metaAnnotationsPatch(annotations) + if err != nil { + return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) + } + result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + context.TODO(), + c.ConnectionPooler[role].Deployment.Name, + types.MergePatchType, + []byte(patchData), + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) + } + return result, nil + +} + +// Test if two connection pooler configuration needs to be synced. For simplicity +// compare not the actual K8S objects, but the configuration itself and request +// sync if there is any difference. +func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { + reasons = []string{} + sync = false + + changelog, err := diff.Diff(oldSpec, newSpec) + if err != nil { + c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + return false, reasons + } + + if len(changelog) > 0 { + sync = true + } + + for _, change := range changelog { + msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", + change.Type, change.Path, change.From, change.To) + reasons = append(reasons, msg) + } + + return sync, reasons +} + +// Check if we need to synchronize connection pooler deployment due to new +// defaults, that are different from what we see in the DeploymentSpec +func (c *Cluster) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { + + reasons = []string{} + sync = false + + config := c.OpConfig.ConnectionPooler + podTemplate := deployment.Spec.Template + poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] + + if spec == nil { + spec = &acidv1.ConnectionPooler{} + } + + if spec.NumberOfInstances == nil && + *deployment.Spec.Replicas != *config.NumberOfInstances { + + sync = true + msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", + *deployment.Spec.Replicas, *config.NumberOfInstances) + reasons = append(reasons, msg) + } + + if spec.DockerImage == "" && + poolerContainer.Image != config.Image { + + sync = true + msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", + poolerContainer.Image, config.Image) + reasons = append(reasons, msg) + } + + expectedResources, err := generateResourceRequirements(spec.Resources, + c.makeDefaultConnectionPoolerResources()) + + // An error to generate expected resources means something is not quite + // right, but for the purpose of robustness do not panic here, just report + // and ignore resources comparison (in the worst case there will be no + // updates for new resource values). + if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { + sync = true + msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", + poolerContainer.Resources, expectedResources) + reasons = append(reasons, msg) + } + + if err != nil { + c.logger.Warningf("Cannot generate expected resources, %v", err) + } + + for _, env := range poolerContainer.Env { + if spec.User == "" && env.Name == "PGUSER" { + ref := env.ValueFrom.SecretKeyRef.LocalObjectReference + + if ref.Name != c.credentialSecretName(config.User) { + sync = true + msg := fmt.Sprintf("pooler user is different (having %s, required %s)", + ref.Name, config.User) + reasons = append(reasons, msg) + } + } + + if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { + sync = true + msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", + env.Value, config.Schema) + reasons = append(reasons, msg) + } + } + + return sync, reasons +} + +// Generate default resource section for connection pooler deployment, to be +// used if nothing custom is specified in the manifest +func (c Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources { + config := c.OpConfig + + defaultRequests := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, + } + defaultLimits := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, + } + + return acidv1.Resources{ + ResourceRequests: defaultRequests, + ResourceLimits: defaultLimits, + } +} + +func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) (SyncReason, error) { + + var reason SyncReason + var err error + var newNeedConnectionPooler, oldNeedConnectionPooler bool + + // Check and perform the sync requirements for each of the roles. + for _, role := range [2]PostgresRole{Master, Replica} { + if role == Master { + newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) + } else { + newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) + } + c.logger.Warningf("in sync, the current role is %s", role) + c.logger.Warningf("in sync, the current object is %v", c.ConnectionPooler) + if c.ConnectionPooler == nil { + c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + } + } + c.logger.Warningf("in sync, now the initialised object is %v %v", c.ConnectionPooler, c.ConnectionPooler[role]) + if newNeedConnectionPooler { + // Try to sync in any case. If we didn't needed connection pooler before, + // it means we want to create it. If it was already present, still sync + // since it could happen that there is no difference in specs, and all + // the resources are remembered, but the deployment was manually deleted + // in between + c.logger.Debug("syncing connection pooler for the role %s", role) + + // in this case also do not forget to install lookup function as for + // creating cluster + if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { + newConnectionPooler := newSpec.Spec.ConnectionPooler + + specSchema := "" + specUser := "" + + if newConnectionPooler != nil { + specSchema = newConnectionPooler.Schema + specUser = newConnectionPooler.User + } + + schema := util.Coalesce( + specSchema, + c.OpConfig.ConnectionPooler.Schema) + + user := util.Coalesce( + specUser, + c.OpConfig.ConnectionPooler.User) + + if err = lookup(schema, user, role); err != nil { + return NoSync, err + } + } + + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + return reason, err + } + } + + if oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources + if c.ConnectionPooler[role] != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + if c.ConnectionPooler[role] != nil && c.ConnectionPooler[role].Deployment == nil && c.ConnectionPooler[role].Service == nil { + c.ConnectionPooler[role] = nil + } + } + + if !oldNeedConnectionPooler && !newNeedConnectionPooler { + // delete and cleanup resources if not empty + if c.ConnectionPooler[role] != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + } + } + + return reason, nil +} + +//TODO use Kubeclient, AnnotationsToPropagate from cluster package +// Synchronize connection pooler resources. Effectively we're interested only in +// synchronizing the corresponding deployment, but in case of deployment or +// service is missing, create it. After checking, also remember an object for +// the future references. +func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( + SyncReason, error) { + + deployment, err := c.KubeClient. + Deployments(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Deployment %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return NoSync, fmt.Errorf(msg, err) + } + + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Deployment = deployment + } else if err != nil { + msg := "could not get connection pooler deployment to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + c.ConnectionPooler[role].Deployment = deployment + + // actual synchronization + oldConnectionPooler := oldSpec.Spec.ConnectionPooler + newConnectionPooler := newSpec.Spec.ConnectionPooler + + // sync implementation below assumes that both old and new specs are + // not nil, but it can happen. To avoid any confusion like updating a + // deployment because the specification changed from nil to an empty + // struct (that was initialized somewhere before) replace any nil with + // an empty spec. + if oldConnectionPooler == nil { + oldConnectionPooler = &acidv1.ConnectionPooler{} + } + + if newConnectionPooler == nil { + newConnectionPooler = &acidv1.ConnectionPooler{} + } + + c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) + + specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) + reason := append(specReason, defaultsReason...) + + if specSync || defaultsSync { + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + if err != nil { + msg := "could not generate deployment for connection pooler: %v" + return reason, fmt.Errorf(msg, err) + } + + oldDeploymentSpec := c.ConnectionPooler[role].Deployment + + deployment, err := c.updateConnectionPoolerDeployment( + oldDeploymentSpec, + newDeploymentSpec, + role) + + if err != nil { + return reason, err + } + c.ConnectionPooler[role].Deployment = deployment + + return reason, nil + } + } + + newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) + if newAnnotations != nil { + c.updateConnectionPoolerAnnotations(newAnnotations, role) + } + + service, err := c.KubeClient. + Services(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Service %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Service = service + + } else if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + // Service updates are not supported and probably not that useful anyway + c.ConnectionPooler[role].Service = service + } + + return NoSync, nil +} From 60460c37c29381e6e23842b66096e6ea69b05b50 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 20 Oct 2020 09:13:28 +0200 Subject: [PATCH 28/40] fix test cases --- pkg/cluster/cluster.go | 30 +++++----- pkg/cluster/connection_pooler.go | 92 ++++++++++++++----------------- pkg/cluster/resources_test.go | 18 +++--- pkg/cluster/sync_test.go | 95 +++++++++++++++++++++++--------- 4 files changed, 130 insertions(+), 105 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index ca0d1ffb0..0a0df52ca 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -330,27 +330,23 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - if c.needConnectionPooler() { + for _, r := range c.RolesConnectionPooler() { + c.logger.Infof("Enabling connection pooler for %s", r) - roles := c.RolesConnectionPooler() - for _, r := range roles { - c.logger.Warningf("found roles are %v", r) + if c.ConnectionPooler[r] != nil { + c.logger.Warningf("Connection pooler %s already exists in the cluster for the role %s", c.connectionPoolerName(r), r) + return nil } - for _, r := range c.RolesConnectionPooler() { - if c.ConnectionPooler[r] != nil { - c.logger.Warning("Connection pooler already exists in the cluster") - return nil - } - connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) - if err != nil { - c.logger.Warningf("could not create connection pooler: %v", err) - return nil - } - c.logger.Infof("connection pooler %q has been successfully created for the role %v", - util.NameFromMeta(connectionPooler.Deployment.ObjectMeta), r) + connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) + if err != nil { + c.logger.Warningf("could not create connection pooler: %v", err) + return nil } + c.logger.Infof("connection pooler %q has been successfully created for the role %v", + util.NameFromMeta(connectionPooler.Deployment.ObjectMeta), r) } + return nil } @@ -639,7 +635,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // initUsers. Check if it needs to be called. sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) && reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) - needConnectionPooler := c.needMasterConnectionPoolerWorker(&newSpec.Spec) + needConnectionPooler := needMasterConnectionPoolerWorker(&newSpec.Spec) if !sameUsers || needConnectionPooler { c.logger.Debugf("syncing secrets") if err := c.initUsers(); err != nil { diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 5a7a2b54d..fd8ff34a0 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -32,6 +32,8 @@ type ConnectionPoolerObjects struct { // this, we can remember the result in memory at least until the next // restart. LookupFunction bool + // Careful with referencing cluster.spec this object pointer changes during runtime and lifetime of cluster + // Cluster *cluster } func (c *Cluster) connectionPoolerName(role PostgresRole) string { @@ -39,45 +41,44 @@ func (c *Cluster) connectionPoolerName(role PostgresRole) string { if role == Replica { name = name + "-repl" } - c.logger.Warningf("found connection pooler name is %s , clustername is %s", name, c.Name) return name } // isConnectionPoolerEnabled -func (c *Cluster) needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) +func (c *Cluster) needConnectionPooler() bool { + return needMasterConnectionPoolerWorker(&c.Spec) || needReplicaConnectionPoolerWorker(&c.Spec) } -func (c *Cluster) needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler +func (c *Cluster) needMasterConnectionPooler() bool { + return needMasterConnectionPoolerWorker(&c.Spec) } -//TODO: use spec from cluster, or include connection pooler related flags to connection pooler package itself -func (c *Cluster) needMasterConnectionPooler() bool { - return c.needMasterConnectionPoolerWorker(&c.Spec) +func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) } -func (c *Cluster) needConnectionPooler() bool { - return c.needMasterConnectionPoolerWorker(&c.Spec) || c.needReplicaConnectionPoolerWorker(&c.Spec) +func (c *Cluster) needReplicaConnectionPooler() bool { + return needReplicaConnectionPoolerWorker(&c.Spec) +} + +func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler } // RolesConnectionPooler gives the list of roles which need connection pooler func (c *Cluster) RolesConnectionPooler() []PostgresRole { roles := make([]PostgresRole, 2) - if c.needMasterConnectionPoolerWorker(&c.Spec) { + if needMasterConnectionPoolerWorker(&c.Spec) { roles = append(roles, Master) } - if c.needMasterConnectionPoolerWorker(&c.Spec) { + if needMasterConnectionPoolerWorker(&c.Spec) { roles = append(roles, Replica) } + c.logger.Warningf("roles found are %v", roles) return roles } -func (c *Cluster) needReplicaConnectionPooler() bool { - return c.needReplicaConnectionPoolerWorker(&c.Spec) -} - // Return connection pooler labels selector, which should from one point of view // inherit most of the labels from the cluster itself, but at the same time // have e.g. different `application` label, so that recreatePod operation will @@ -91,7 +92,7 @@ func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.Labe "application": "db-connection-pooler", "role": string(role), "cluster-name": c.Name, - "Namesapce": c.Namespace, + "Namespace": c.Namespace, }) connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) @@ -136,12 +137,18 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRo return nil, fmt.Errorf(msg, err) } if c.ConnectionPooler == nil { - c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{role: { - Deployment: nil, - Service: nil, - LookupFunction: false, - }} - c.logger.Warningf("in creation, now the initialised object is %v %v", c.ConnectionPooler, c.ConnectionPooler[role]) + c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + } } deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) if err != nil { @@ -466,7 +473,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Lack of connection pooler objects is not a fatal error, just log it if // it was present before in the manifest - if c == nil { + if c == nil || role == "" { c.logger.Infof("No connection pooler to delete") return nil } @@ -494,7 +501,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { return fmt.Errorf("could not delete deployment: %v", err) } - c.logger.Infof("Connection pooler deployment %q has been deleted", c.connectionPoolerName(role)) + c.logger.Infof("Connection pooler deployment %q has been deleted for role %s", c.connectionPoolerName(role), role) } // Repeat the same for the service object @@ -513,7 +520,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { return fmt.Errorf("could not delete service: %v", err) } - c.logger.Infof("Connection pooler service %q has been deleted", c.connectionPoolerName(role)) + c.logger.Infof("Connection pooler service %q has been deleted for role %s", c.connectionPoolerName(role), role) } // Repeat the same for the secret object secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) @@ -530,7 +537,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { } } - c = nil + c.ConnectionPooler = nil return nil } @@ -717,14 +724,12 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look // Check and perform the sync requirements for each of the roles. for _, role := range [2]PostgresRole{Master, Replica} { if role == Master { - newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&oldSpec.Spec) + newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec) } else { - newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec) + newNeedConnectionPooler = needReplicaConnectionPoolerWorker(&newSpec.Spec) + oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec) } - c.logger.Warningf("in sync, the current role is %s", role) - c.logger.Warningf("in sync, the current object is %v", c.ConnectionPooler) if c.ConnectionPooler == nil { c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ Master: { @@ -739,14 +744,13 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look }, } } - c.logger.Warningf("in sync, now the initialised object is %v %v", c.ConnectionPooler, c.ConnectionPooler[role]) if newNeedConnectionPooler { // Try to sync in any case. If we didn't needed connection pooler before, // it means we want to create it. If it was already present, still sync // since it could happen that there is no difference in specs, and all // the resources are remembered, but the deployment was manually deleted // in between - c.logger.Debug("syncing connection pooler for the role %s", role) + c.logger.Debugf("syncing connection pooler for the role %s", role) // in this case also do not forget to install lookup function as for // creating cluster @@ -780,7 +784,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look } } - if oldNeedConnectionPooler && !newNeedConnectionPooler { + if !newNeedConnectionPooler { // delete and cleanup resources if c.ConnectionPooler[role] != nil && (c.ConnectionPooler[role].Deployment != nil || @@ -790,28 +794,12 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look c.logger.Warningf("could not remove connection pooler: %v", err) } } - if c.ConnectionPooler[role] != nil && c.ConnectionPooler[role].Deployment == nil && c.ConnectionPooler[role].Service == nil { - c.ConnectionPooler[role] = nil - } - } - - if !oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources if not empty - if c.ConnectionPooler[role] != nil && - (c.ConnectionPooler[role].Deployment != nil || - c.ConnectionPooler[role].Service != nil) { - - if err = c.deleteConnectionPooler(role); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } } } return reason, nil } -//TODO use Kubeclient, AnnotationsToPropagate from cluster package // Synchronize connection pooler resources. Effectively we're interested only in // synchronizing the corresponding deployment, but in case of deployment or // service is missing, create it. After checking, also remember an object for diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index 810119c89..aa44a8e60 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func mockInstallLookupFunction(schema string, user string) error { +func mockInstallLookupFunction(schema string, user string, role PostgresRole) error { return nil } @@ -48,19 +48,19 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, EnableReplicaConnectionPooler: boolToPointer(true), } - poolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction) + for _, role := range cluster.RolesConnectionPooler() { + poolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction, Master) - if err != nil { - t.Errorf("%s: Cannot create connection pooler, %s, %+v", - testName, err, poolerResources) - } + if err != nil { + t.Errorf("%s: Cannot create connection pooler, %s, %+v", + testName, err, poolerResources) + } - for _, role := range cluster.RolesConnectionPooler() { - if poolerResources.Deployment[role] == nil { + if poolerResources.Deployment == nil { t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) } - if poolerResources.Service[role] == nil { + if poolerResources.Service == nil { t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) } diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index 491329265..dd9e40822 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -19,10 +19,10 @@ func int32ToPointer(value int32) *int32 { } func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range cluster.RolesConnectionPooler() { - if cluster.ConnectionPooler.Deployment[role] != nil && - (cluster.ConnectionPooler.Deployment[role].Spec.Replicas == nil || - *cluster.ConnectionPooler.Deployment[role].Spec.Replicas != 2) { + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil && + (cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil || + *cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) { return fmt.Errorf("Wrong number of instances") } } @@ -35,11 +35,11 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { } for _, role := range []PostgresRole{Master, Replica} { - if cluster.ConnectionPooler.Deployment[role] == nil { + if cluster.ConnectionPooler[role].Deployment == nil { return fmt.Errorf("Deployment was not saved %s", role) } - if cluster.ConnectionPooler.Service[role] == nil { + if cluster.ConnectionPooler[role].Service == nil { return fmt.Errorf("Service was not saved %s", role) } } @@ -52,11 +52,11 @@ func MasterobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error return fmt.Errorf("Connection pooler resources are empty") } - if cluster.ConnectionPooler.Deployment[Master] == nil { + if cluster.ConnectionPooler[Master].Deployment == nil { return fmt.Errorf("Deployment was not saved") } - if cluster.ConnectionPooler.Service[Master] == nil { + if cluster.ConnectionPooler[Master].Service == nil { return fmt.Errorf("Service was not saved") } @@ -68,11 +68,11 @@ func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) erro return fmt.Errorf("Connection pooler resources are empty") } - if cluster.ConnectionPooler.Deployment[Replica] == nil { + if cluster.ConnectionPooler[Replica].Deployment == nil { return fmt.Errorf("Deployment was not saved") } - if cluster.ConnectionPooler.Service[Replica] == nil { + if cluster.ConnectionPooler[Replica].Service == nil { return fmt.Errorf("Service was not saved") } @@ -80,8 +80,11 @@ func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) erro } func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler != nil { - return fmt.Errorf("Connection pooler was not deleted") + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil && + (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { + return fmt.Errorf("Connection pooler was not deleted for role %v", cluster.ConnectionPooler[role]) + } } return nil @@ -89,8 +92,8 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler != nil && - (cluster.ConnectionPooler.Deployment[Master] != nil || cluster.ConnectionPooler.Service[Master] != nil) { + if cluster.ConnectionPooler[Master] != nil && + (cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) { return fmt.Errorf("Connection pooler master was not deleted") } return nil @@ -98,8 +101,8 @@ func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler != nil && - (cluster.ConnectionPooler.Deployment[Replica] != nil || cluster.ConnectionPooler.Service[Replica] != nil) { + if cluster.ConnectionPooler[Replica] != nil && + (cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) { return fmt.Errorf("Connection pooler replica was not deleted") } return nil @@ -152,21 +155,38 @@ func TestConnectionPoolerSynchronization(t *testing.T) { clusterDirtyMock := newCluster() clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() - clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: make(map[PostgresRole]*appsv1.Deployment), - Service: make(map[PostgresRole]*v1.Service), + clusterDirtyMock.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, } - clusterDirtyMock.ConnectionPooler.Deployment[Master] = &appsv1.Deployment{} - clusterDirtyMock.ConnectionPooler.Service[Master] = &v1.Service{} + + clusterDirtyMock.ConnectionPooler[Master].Deployment = &appsv1.Deployment{} + clusterDirtyMock.ConnectionPooler[Master].Service = &v1.Service{} clusterReplicaDirtyMock := newCluster() clusterReplicaDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() - clusterReplicaDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: make(map[PostgresRole]*appsv1.Deployment), - Service: make(map[PostgresRole]*v1.Service), + clusterReplicaDirtyMock.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, } - clusterDirtyMock.ConnectionPooler.Deployment[Replica] = &appsv1.Deployment{} - clusterDirtyMock.ConnectionPooler.Service[Replica] = &v1.Service{} + clusterDirtyMock.ConnectionPooler[Replica].Deployment = &appsv1.Deployment{} + clusterDirtyMock.ConnectionPooler[Replica].Service = &v1.Service{} clusterNewDefaultsMock := newCluster() clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient() @@ -321,7 +341,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, }, }, - cluster: clusterMock, + cluster: clusterDirtyMock, defaultImage: "pooler:1.0", defaultInstances: 1, check: OnlyReplicaDeleted, @@ -360,6 +380,27 @@ func TestConnectionPoolerSynchronization(t *testing.T) { defaultInstances: 1, check: deploymentUpdated, }, + { + subTest: "update deployment", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(1), + }, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(2), + }, + }, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: deploymentUpdated, + }, { subTest: "update image from changed defaults", oldSpec: &acidv1.Postgresql{ From 5828e2f19e8cfe8733f72e153c9bd9fc6b58ac63 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 20 Oct 2020 12:16:29 +0200 Subject: [PATCH 29/40] avoid passing spec unnecessarily --- pkg/cluster/connection_pooler.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index fd8ff34a0..17a800186 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -150,7 +150,7 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRo }, } } - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, role) + deploymentSpec, err := c.generateConnectionPoolerDeployment(role) if err != nil { msg = "could not generate deployment for connection pooler: %v" return nil, fmt.Errorf(msg, err) @@ -202,7 +202,8 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRo // have to wait for spinning up a new connections. // // RESERVE_SIZE is how many additional connections to allow for a pooler. -func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { +func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar { + spec := &c.Spec effectiveMode := util.Coalesce( spec.ConnectionPooler.Mode, c.OpConfig.ConnectionPooler.Mode) @@ -261,9 +262,9 @@ func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.Env } } -func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, role PostgresRole) ( +func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( *v1.PodTemplateSpec, error) { - + spec := &c.Spec gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) resources, err := generateResourceRequirements( spec.ConnectionPooler.Resources, @@ -322,7 +323,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, }, }, } - envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...) + envVars = append(envVars, c.getConnectionPoolerEnvVars()...) poolerContainer := v1.Container{ Name: connectionPoolerContainer, @@ -364,8 +365,9 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec, return podTemplate, nil } -func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, role PostgresRole) ( +func (c *Cluster) generateConnectionPoolerDeployment(role PostgresRole) ( *appsv1.Deployment, error) { + spec := &c.Spec // there are two ways to enable connection pooler, either to specify a // connectionPooler section or enableConnectionPooler. In the second case @@ -377,7 +379,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec, spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec, role) + podTemplate, err := c.generateConnectionPoolerPodTemplate(role) numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( @@ -815,7 +817,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql msg := "Deployment %s for connection pooler synchronization is not found, create it" c.logger.Warningf(msg, c.connectionPoolerName(role)) - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + deploymentSpec, err := c.generateConnectionPoolerDeployment(role) if err != nil { msg = "could not generate deployment for connection pooler: %v" return NoSync, fmt.Errorf(msg, err) @@ -861,7 +863,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if specSync || defaultsSync { c.logger.Infof("Update connection pooler deployment %s, reason: %+v", c.connectionPoolerName(role), reason) - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec, role) + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(role) if err != nil { msg := "could not generate deployment for connection pooler: %v" return reason, fmt.Errorf(msg, err) From c8917986c0cf1919d9b35e6042eead4aa23d98e9 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 20 Oct 2020 15:23:10 +0200 Subject: [PATCH 30/40] Code utilization of syncConnectionPooler create and sync connectionPooler were doing essentially the same task with respect to creating new connectionPooler if not present. Hence, the code from create is already present in sync, which is now utilized and duplicate code is removed. Other code duplication is also removed in sync, for deciding upon when to delete the connectionPooler. Basically, anytime newNeedConnectionPooler is false, we have to delete it, if present. Other respective modifications in tests. --- pkg/cluster/cluster.go | 19 ++--- pkg/cluster/connection_pooler.go | 115 ++++++++++--------------------- pkg/cluster/k8sres_test.go | 16 +---- pkg/cluster/resources_test.go | 8 +-- 4 files changed, 47 insertions(+), 111 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 0a0df52ca..4ba0c9918 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -330,21 +330,10 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - for _, r := range c.RolesConnectionPooler() { - c.logger.Infof("Enabling connection pooler for %s", r) - - if c.ConnectionPooler[r] != nil { - c.logger.Warningf("Connection pooler %s already exists in the cluster for the role %s", c.connectionPoolerName(r), r) - return nil - } - - connectionPooler, err := c.createConnectionPooler(c.installLookupFunction, r) - if err != nil { - c.logger.Warningf("could not create connection pooler: %v", err) - return nil - } - c.logger.Infof("connection pooler %q has been successfully created for the role %v", - util.NameFromMeta(connectionPooler.Deployment.ObjectMeta), r) + c.createConnectionPooler(c.installLookupFunction) + if err != nil { + c.logger.Warningf("could not create connection pooler: %v", err) + return nil } return nil diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 17a800186..7ac0df5ef 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -115,73 +115,15 @@ func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.Labe // have connectionpooler name in the cp object to have it immutable name // add these cp related functions to a new cp file // opConfig, cluster, and database name -func (c *Cluster) createConnectionPooler(lookup InstallFunction, role PostgresRole) (*ConnectionPoolerObjects, error) { - var msg string +func (c *Cluster) createConnectionPooler(lookup InstallFunction) (SyncReason, error) { + var reason SyncReason c.setProcessName("creating connection pooler") - schema := c.Spec.ConnectionPooler.Schema - - if schema == "" { - schema = c.OpConfig.ConnectionPooler.Schema - } - - user := c.Spec.ConnectionPooler.User - if user == "" { - user = c.OpConfig.ConnectionPooler.User - } - - err := lookup(schema, user, role) - - if err != nil { - msg = "could not prepare database for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - if c.ConnectionPooler == nil { - c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { - Deployment: nil, - Service: nil, - LookupFunction: false, - }, - Replica: { - Deployment: nil, - Service: nil, - LookupFunction: false, - }, - } + //this is essentially sync with nil as oldSpec + if reason, err := c.syncConnectionPooler(nil, &c.Postgresql, lookup); err != nil { + return reason, err } - deploymentSpec, err := c.generateConnectionPoolerDeployment(role) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - - serviceSpec := c.generateConnectionPoolerService(&c.Spec, role) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - c.ConnectionPooler[role].Deployment = deployment - c.ConnectionPooler[role].Service = service - - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - - return c.ConnectionPooler[role], nil + return reason, nil } // @@ -545,7 +487,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. -func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { +func (c *Cluster) updateConnectionPoolerDeployment(newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { //c.setProcessName("updating connection pooler") if c == nil || c.ConnectionPooler[role].Deployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") @@ -600,13 +542,13 @@ func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]strin // Test if two connection pooler configuration needs to be synced. For simplicity // compare not the actual K8S objects, but the configuration itself and request // sync if there is any difference. -func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { +func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { reasons = []string{} sync = false changelog, err := diff.Diff(oldSpec, newSpec) if err != nil { - c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + //c.logger.Infof("Cannot get diff, do not do anything, %+v", err) return false, reasons } @@ -722,15 +664,25 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look var reason SyncReason var err error var newNeedConnectionPooler, oldNeedConnectionPooler bool + oldNeedConnectionPooler = false // Check and perform the sync requirements for each of the roles. for _, role := range [2]PostgresRole{Master, Replica} { + + if c.ConnectionPooler[role] != nil { + c.logger.Warningf("Connection pooler %s already exists in the cluster for the role %s", c.connectionPoolerName(role), role) + return NoSync, nil + } if role == Master { newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec) + if oldSpec != nil { + oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec) + } } else { newNeedConnectionPooler = needReplicaConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec) + if oldSpec != nil { + oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec) + } } if c.ConnectionPooler == nil { c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ @@ -784,9 +736,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look c.logger.Errorf("could not sync connection pooler: %v", err) return reason, err } - } - - if !newNeedConnectionPooler { + } else { // delete and cleanup resources if c.ConnectionPooler[role] != nil && (c.ConnectionPooler[role].Deployment != nil || @@ -836,11 +786,15 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql return NoSync, fmt.Errorf(msg, err) } else { c.ConnectionPooler[role].Deployment = deployment - // actual synchronization - oldConnectionPooler := oldSpec.Spec.ConnectionPooler - newConnectionPooler := newSpec.Spec.ConnectionPooler + var oldConnectionPooler *acidv1.ConnectionPooler + + if oldSpec != nil { + oldConnectionPooler = oldSpec.Spec.ConnectionPooler + } + + newConnectionPooler := newSpec.Spec.ConnectionPooler // sync implementation below assumes that both old and new specs are // not nil, but it can happen. To avoid any confusion like updating a // deployment because the specification changed from nil to an empty @@ -856,7 +810,13 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) - specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + var specSync bool + var specReason []string + + if oldSpec != nil { + specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + } + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) reason := append(specReason, defaultsReason...) @@ -869,10 +829,9 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql return reason, fmt.Errorf(msg, err) } - oldDeploymentSpec := c.ConnectionPooler[role].Deployment + //oldDeploymentSpec := c.ConnectionPooler[role].Deployment deployment, err := c.updateConnectionPoolerDeployment( - oldDeploymentSpec, newDeploymentSpec, role) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index da3a56d24..d918e5c10 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1022,18 +1022,6 @@ func TestConnectionPoolerPodSpec(t *testing.T) { }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - var clusterNoDefaultRes = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{}, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } tests := []struct { @@ -1092,7 +1080,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) { } for _, role := range [2]PostgresRole{Master, Replica} { for _, tt := range tests { - podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec, role) + podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role) if err != tt.expected && err.Error() != tt.expected.Error() { t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", @@ -1199,7 +1187,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { }, } for _, tt := range tests { - deployment, err := tt.cluster.generateConnectionPoolerDeployment(tt.spec, Master) + deployment, err := tt.cluster.generateConnectionPoolerDeployment(Master) if err != tt.expected && err.Error() != tt.expected.Error() { t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index aa44a8e60..a688d400b 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -49,18 +49,18 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { EnableReplicaConnectionPooler: boolToPointer(true), } for _, role := range cluster.RolesConnectionPooler() { - poolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction, Master) + reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) if err != nil { t.Errorf("%s: Cannot create connection pooler, %s, %+v", - testName, err, poolerResources) + testName, err, reason) } - if poolerResources.Deployment == nil { + if cluster.ConnectionPooler[role].Deployment == nil { t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) } - if poolerResources.Service == nil { + if cluster.ConnectionPooler[role].Service == nil { t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) } From 711299591777fe8a603f7575afd77bff55eb3575 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 21 Oct 2020 07:36:34 +0200 Subject: [PATCH 31/40] Fix sync/create issues --- pkg/cluster/connection_pooler.go | 12 +++-------- pkg/cluster/k8sres_test.go | 11 ++++++++++ pkg/cluster/resources_test.go | 37 +++++++++++++++++--------------- pkg/cluster/sync_test.go | 2 +- 4 files changed, 35 insertions(+), 27 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 7ac0df5ef..427192b19 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -75,7 +75,6 @@ func (c *Cluster) RolesConnectionPooler() []PostgresRole { if needMasterConnectionPoolerWorker(&c.Spec) { roles = append(roles, Replica) } - c.logger.Warningf("roles found are %v", roles) return roles } @@ -451,7 +450,9 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Repeat the same for the service object var service *v1.Service service = c.ConnectionPooler[role].Service - + if service == nil { + c.logger.Infof("nil service to be deleted") + } if service != nil { err = c.KubeClient. @@ -579,7 +580,6 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler if spec == nil { spec = &acidv1.ConnectionPooler{} } - if spec.NumberOfInstances == nil && *deployment.Spec.Replicas != *config.NumberOfInstances { @@ -669,10 +669,6 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look // Check and perform the sync requirements for each of the roles. for _, role := range [2]PostgresRole{Master, Replica} { - if c.ConnectionPooler[role] != nil { - c.logger.Warningf("Connection pooler %s already exists in the cluster for the role %s", c.connectionPoolerName(role), role) - return NoSync, nil - } if role == Master { newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec) if oldSpec != nil { @@ -839,8 +835,6 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql return reason, err } c.ConnectionPooler[role].Deployment = deployment - - return reason, nil } } diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index d918e5c10..d46169e88 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1021,6 +1021,17 @@ func TestConnectionPoolerPodSpec(t *testing.T) { }, }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + var clusterNoDefaultRes = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{}, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go index a688d400b..8dfced429 100644 --- a/pkg/cluster/resources_test.go +++ b/pkg/cluster/resources_test.go @@ -34,6 +34,7 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { ConnectionPoolerDefaultCPULimit: "100m", ConnectionPoolerDefaultMemoryRequest: "100Mi", ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), }, }, }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) @@ -48,25 +49,27 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, EnableReplicaConnectionPooler: boolToPointer(true), } - for _, role := range cluster.RolesConnectionPooler() { - reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) - - if err != nil { - t.Errorf("%s: Cannot create connection pooler, %s, %+v", - testName, err, reason) - } - - if cluster.ConnectionPooler[role].Deployment == nil { - t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) - } - if cluster.ConnectionPooler[role].Service == nil { - t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) - } + reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) - err = cluster.deleteConnectionPooler(role) - if err != nil { - t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) + if err != nil { + t.Errorf("%s: Cannot create connection pooler, %s, %+v", + testName, err, reason) + } + for _, role := range cluster.RolesConnectionPooler() { + if cluster.ConnectionPooler[role] != nil { + if cluster.ConnectionPooler[role].Deployment == nil { + t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) + } + + if cluster.ConnectionPooler[role].Service == nil { + t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) + } + + err = cluster.deleteConnectionPooler(role) + if err != nil { + t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) + } } } } diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index dd9e40822..48b6f434f 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -83,7 +83,7 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { for _, role := range [2]PostgresRole{Master, Replica} { if cluster.ConnectionPooler[role] != nil && (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { - return fmt.Errorf("Connection pooler was not deleted for role %v", cluster.ConnectionPooler[role]) + return fmt.Errorf("Connection pooler was not deleted for role %v", role) } } From cbdc3464a155abccc9959e6042709fb03c2c2c7c Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 21 Oct 2020 07:56:56 +0200 Subject: [PATCH 32/40] fix podSpec test --- pkg/cluster/k8sres_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index d46169e88..9c2eb7d87 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1021,6 +1021,11 @@ func TestConnectionPoolerPodSpec(t *testing.T) { }, }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } var clusterNoDefaultRes = New( Config{ OpConfig: config.Config{ @@ -1033,6 +1038,11 @@ func TestConnectionPoolerPodSpec(t *testing.T) { }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + clusterNoDefaultRes.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } tests := []struct { From 72e2af8c739c78dfaf82068ff4343a84c63b983b Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 21 Oct 2020 16:39:04 +0200 Subject: [PATCH 33/40] isolate tests and other updates --- pkg/cluster/cluster.go | 4 - pkg/cluster/connection_pooler.go | 24 +- pkg/cluster/k8sres_test.go | 200 -------------- pkg/cluster/resources_test.go | 172 ------------ pkg/cluster/sync_test.go | 454 ------------------------------- 5 files changed, 10 insertions(+), 844 deletions(-) delete mode 100644 pkg/cluster/resources_test.go delete mode 100644 pkg/cluster/sync_test.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 4ba0c9918..e671355d1 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -331,10 +331,6 @@ func (c *Cluster) Create() error { // Do not consider connection pooler as a strict requirement, and if // something fails, report warning c.createConnectionPooler(c.installLookupFunction) - if err != nil { - c.logger.Warningf("could not create connection pooler: %v", err) - return nil - } return nil } diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 427192b19..4170f324b 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -488,9 +488,8 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. -func (c *Cluster) updateConnectionPoolerDeployment(newDeployment *appsv1.Deployment, role PostgresRole) (*appsv1.Deployment, error) { - //c.setProcessName("updating connection pooler") - if c == nil || c.ConnectionPooler[role].Deployment == nil { +func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { + if newDeployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } @@ -502,10 +501,10 @@ func (c *Cluster) updateConnectionPoolerDeployment(newDeployment *appsv1.Deploym // An update probably requires RetryOnConflict, but since only one operator // worker at one time will try to update it chances of conflicts are // minimal. - deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + deployment, err := KubeClient. + Deployments(newDeployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler[role].Deployment.Name, + newDeployment.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, @@ -514,8 +513,6 @@ func (c *Cluster) updateConnectionPoolerDeployment(newDeployment *appsv1.Deploym return nil, fmt.Errorf("could not patch deployment: %v", err) } - c.ConnectionPooler[role].Deployment = deployment - return deployment, nil } @@ -659,7 +656,7 @@ func (c Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources { } } -func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) (SyncReason, error) { +func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, installLookupFunction InstallFunction) (SyncReason, error) { var reason SyncReason var err error @@ -700,7 +697,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look // since it could happen that there is no difference in specs, and all // the resources are remembered, but the deployment was manually deleted // in between - c.logger.Debugf("syncing connection pooler for the role %s", role) + c.logger.Debugf("syncing connection pooler for the Spilo role %s", role) // in this case also do not forget to install lookup function as for // creating cluster @@ -723,7 +720,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look specUser, c.OpConfig.ConnectionPooler.User) - if err = lookup(schema, user, role); err != nil { + if err = installLookupFunction(schema, user, role); err != nil { return NoSync, err } } @@ -827,9 +824,8 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql //oldDeploymentSpec := c.ConnectionPooler[role].Deployment - deployment, err := c.updateConnectionPoolerDeployment( - newDeploymentSpec, - role) + deployment, err := updateConnectionPoolerDeployment(c.KubeClient, + newDeploymentSpec) if err != nil { return reason, err diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 9c2eb7d87..be5f73de4 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -2,7 +2,6 @@ package cluster import ( "context" - "errors" "fmt" "reflect" "sort" @@ -1002,122 +1001,6 @@ func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error return nil } -func TestConnectionPoolerPodSpec(t *testing.T) { - testName := "Test connection pooler pod template generation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - MaxDBConnections: int32ToPointer(60), - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - cluster.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - } - var clusterNoDefaultRes = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{}, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - clusterNoDefaultRes.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - } - - noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } - - tests := []struct { - subTest string - spec *acidv1.PostgresSpec - expected error - cluster *Cluster - check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error - }{ - { - subTest: "default configuration", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: noCheck, - }, - { - subTest: "no default resources", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`), - cluster: clusterNoDefaultRes, - check: noCheck, - }, - { - subTest: "default resources are set", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: testResources, - }, - { - subTest: "labels for service", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - }, - expected: nil, - cluster: cluster, - check: testLabels, - }, - { - subTest: "required envs", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: testEnvs, - }, - } - for _, role := range [2]PostgresRole{Master, Replica} { - for _, tt := range tests { - podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role) - - if err != tt.expected && err.Error() != tt.expected.Error() { - t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", - testName, tt.subTest, err, tt.expected) - } - - err = tt.check(cluster, podSpec, role) - if err != nil { - t.Errorf("%s [%s]: Pod spec is incorrect, %+v", - testName, tt.subTest, err) - } - } - } - -} - func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployment) error { owner := deployment.ObjectMeta.OwnerReferences[0] @@ -1141,89 +1024,6 @@ func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { return nil } -func TestConnectionPoolerDeploymentSpec(t *testing.T) { - testName := "Test connection pooler deployment spec generation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - noCheck := func(cluster *Cluster, deployment *appsv1.Deployment) error { - return nil - } - - tests := []struct { - subTest string - spec *acidv1.PostgresSpec - expected error - cluster *Cluster - check func(cluster *Cluster, deployment *appsv1.Deployment) error - }{ - { - subTest: "default configuration", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - }, - expected: nil, - cluster: cluster, - check: noCheck, - }, - { - subTest: "owner reference", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - }, - expected: nil, - cluster: cluster, - check: testDeploymentOwnwerReference, - }, - { - subTest: "selector", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - }, - expected: nil, - cluster: cluster, - check: testSelector, - }, - } - for _, tt := range tests { - deployment, err := tt.cluster.generateConnectionPoolerDeployment(Master) - - if err != tt.expected && err.Error() != tt.expected.Error() { - t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", - testName, tt.subTest, err, tt.expected) - } - - err = tt.check(cluster, deployment) - if err != nil { - t.Errorf("%s [%s]: Deployment spec is incorrect, %+v", - testName, tt.subTest, err) - } - } - -} - func testServiceOwnwerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { owner := service.ObjectMeta.OwnerReferences[0] diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go deleted file mode 100644 index 8dfced429..000000000 --- a/pkg/cluster/resources_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package cluster - -import ( - "testing" - - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/util/config" - "github.com/zalando/postgres-operator/pkg/util/k8sutil" - - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func mockInstallLookupFunction(schema string, user string, role PostgresRole) error { - return nil -} - -func boolToPointer(value bool) *bool { - return &value -} - -func TestConnectionPoolerCreationAndDeletion(t *testing.T) { - testName := "Test connection pooler creation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - NumberOfInstances: int32ToPointer(1), - }, - }, - }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) - - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - cluster.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - } - - reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) - - if err != nil { - t.Errorf("%s: Cannot create connection pooler, %s, %+v", - testName, err, reason) - } - for _, role := range cluster.RolesConnectionPooler() { - if cluster.ConnectionPooler[role] != nil { - if cluster.ConnectionPooler[role].Deployment == nil { - t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) - } - - if cluster.ConnectionPooler[role].Service == nil { - t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) - } - - err = cluster.deleteConnectionPooler(role) - if err != nil { - t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) - } - } - } -} - -func TestNeedConnectionPooler(t *testing.T) { - testName := "Test how connection pooler can be enabled" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) - - cluster.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if !cluster.needMasterConnectionPooler() { - t.Errorf("%s: Connection pooler is not enabled with full definition", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - } - - if !cluster.needMasterConnectionPooler() { - t.Errorf("%s: Connection pooler is not enabled with flag", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(false), - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if cluster.needMasterConnectionPooler() { - t.Errorf("%s: Connection pooler is still enabled with flag being false", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if !cluster.needMasterConnectionPooler() { - t.Errorf("%s: Connection pooler is not enabled with flag and full", - testName) - } - - // Test for replica connection pooler - cluster.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if cluster.needReplicaConnectionPooler() { - t.Errorf("%s: Replica Connection pooler is not enabled with full definition", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableReplicaConnectionPooler: boolToPointer(true), - } - - if !cluster.needReplicaConnectionPooler() { - t.Errorf("%s: Replica Connection pooler is not enabled with flag", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableReplicaConnectionPooler: boolToPointer(false), - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if cluster.needReplicaConnectionPooler() { - t.Errorf("%s: Replica Connection pooler is still enabled with flag being false", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableReplicaConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if !cluster.needReplicaConnectionPooler() { - t.Errorf("%s: Replica Connection pooler is not enabled with flag and full", - testName) - } -} diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go deleted file mode 100644 index 48b6f434f..000000000 --- a/pkg/cluster/sync_test.go +++ /dev/null @@ -1,454 +0,0 @@ -package cluster - -import ( - "fmt" - "strings" - "testing" - - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/util/config" - "github.com/zalando/postgres-operator/pkg/util/k8sutil" - - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func int32ToPointer(value int32) *int32 { - return &value -} - -func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range [2]PostgresRole{Master, Replica} { - if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil && - (cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil || - *cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) { - return fmt.Errorf("Wrong number of instances") - } - } - return nil -} - -func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler == nil { - return fmt.Errorf("Connection pooler resources are empty") - } - - for _, role := range []PostgresRole{Master, Replica} { - if cluster.ConnectionPooler[role].Deployment == nil { - return fmt.Errorf("Deployment was not saved %s", role) - } - - if cluster.ConnectionPooler[role].Service == nil { - return fmt.Errorf("Service was not saved %s", role) - } - } - - return nil -} - -func MasterobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler == nil { - return fmt.Errorf("Connection pooler resources are empty") - } - - if cluster.ConnectionPooler[Master].Deployment == nil { - return fmt.Errorf("Deployment was not saved") - } - - if cluster.ConnectionPooler[Master].Service == nil { - return fmt.Errorf("Service was not saved") - } - - return nil -} - -func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler == nil { - return fmt.Errorf("Connection pooler resources are empty") - } - - if cluster.ConnectionPooler[Replica].Deployment == nil { - return fmt.Errorf("Deployment was not saved") - } - - if cluster.ConnectionPooler[Replica].Service == nil { - return fmt.Errorf("Service was not saved") - } - - return nil -} - -func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range [2]PostgresRole{Master, Replica} { - if cluster.ConnectionPooler[role] != nil && - (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { - return fmt.Errorf("Connection pooler was not deleted for role %v", role) - } - } - - return nil -} - -func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { - - if cluster.ConnectionPooler[Master] != nil && - (cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) { - return fmt.Errorf("Connection pooler master was not deleted") - } - return nil -} - -func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { - - if cluster.ConnectionPooler[Replica] != nil && - (cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) { - return fmt.Errorf("Connection pooler replica was not deleted") - } - return nil -} - -func noEmptySync(cluster *Cluster, err error, reason SyncReason) error { - for _, msg := range reason { - if strings.HasPrefix(msg, "update [] from '' to '") { - return fmt.Errorf("There is an empty reason, %s", msg) - } - } - - return nil -} - -func TestConnectionPoolerSynchronization(t *testing.T) { - testName := "Test connection pooler synchronization" - newCluster := func() *Cluster { - return New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - NumberOfInstances: int32ToPointer(1), - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - } - cluster := newCluster() - - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - clusterMissingObjects := newCluster() - clusterMissingObjects.KubeClient = k8sutil.ClientMissingObjects() - - clusterMock := newCluster() - clusterMock.KubeClient = k8sutil.NewMockKubernetesClient() - - clusterDirtyMock := newCluster() - clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() - clusterDirtyMock.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { - Deployment: nil, - Service: nil, - LookupFunction: false, - }, - Replica: { - Deployment: nil, - Service: nil, - LookupFunction: false, - }, - } - - clusterDirtyMock.ConnectionPooler[Master].Deployment = &appsv1.Deployment{} - clusterDirtyMock.ConnectionPooler[Master].Service = &v1.Service{} - clusterReplicaDirtyMock := newCluster() - clusterReplicaDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() - clusterReplicaDirtyMock.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { - Deployment: nil, - Service: nil, - LookupFunction: false, - }, - Replica: { - Deployment: nil, - Service: nil, - LookupFunction: false, - }, - } - - clusterDirtyMock.ConnectionPooler[Replica].Deployment = &appsv1.Deployment{} - clusterDirtyMock.ConnectionPooler[Replica].Service = &v1.Service{} - - clusterNewDefaultsMock := newCluster() - clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient() - - tests := []struct { - subTest string - oldSpec *acidv1.Postgresql - newSpec *acidv1.Postgresql - cluster *Cluster - defaultImage string - defaultInstances int32 - check func(cluster *Cluster, err error, reason SyncReason) error - }{ - { - subTest: "create if doesn't exist", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: clusterMissingObjects, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: MasterobjectsAreSaved, - }, - { - subTest: "create if doesn't exist with a flag", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - }, - }, - cluster: clusterMissingObjects, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: MasterobjectsAreSaved, - }, - { - subTest: "create replica if doesn't exist with a flag", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - }, - }, - cluster: clusterDirtyMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: ReplicaobjectsAreSaved, - }, - { - subTest: "create no replica with flag", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableReplicaConnectionPooler: boolToPointer(false), - }, - }, - cluster: clusterDirtyMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreDeleted, - }, - { - subTest: "create from scratch", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: clusterMissingObjects, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: MasterobjectsAreSaved, - }, - { - subTest: "create both master and replica", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - EnableConnectionPooler: boolToPointer(true), - }, - }, - cluster: clusterMissingObjects, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreSaved, - }, - { - subTest: "delete if not needed", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - cluster: clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreDeleted, - }, - { - subTest: "delete only master if not needed", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableConnectionPooler: boolToPointer(true), - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableReplicaConnectionPooler: boolToPointer(true), - }, - }, - cluster: clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: OnlyMasterDeleted, - }, - { - subTest: "delete only replica if not needed", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: clusterDirtyMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: OnlyReplicaDeleted, - }, - { - subTest: "cleanup if still there", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - cluster: clusterDirtyMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreDeleted, - }, - { - subTest: "update deployment", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{ - NumberOfInstances: int32ToPointer(1), - }, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{ - NumberOfInstances: int32ToPointer(2), - }, - }, - }, - cluster: clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: deploymentUpdated, - }, - { - subTest: "update deployment", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{ - NumberOfInstances: int32ToPointer(1), - }, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{ - NumberOfInstances: int32ToPointer(2), - }, - }, - }, - cluster: clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: deploymentUpdated, - }, - { - subTest: "update image from changed defaults", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: clusterNewDefaultsMock, - defaultImage: "pooler:2.0", - defaultInstances: 2, - check: deploymentUpdated, - }, - { - subTest: "there is no sync from nil to an empty spec", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: nil, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: noEmptySync, - }, - } - for _, tt := range tests { - tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage - tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances = - int32ToPointer(tt.defaultInstances) - - reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec, - tt.newSpec, mockInstallLookupFunction) - - if err := tt.check(tt.cluster, err, reason); err != nil { - t.Errorf("%s [%s]: Could not synchronize, %+v", - testName, tt.subTest, err) - } - } -} From 3d3bc147d3b92ea3004894e30da76fd31bf563c2 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Wed, 21 Oct 2020 22:40:26 +0200 Subject: [PATCH 34/40] Refactor more methods --- pkg/cluster/connection_pooler.go | 81 ++++++++++++------- pkg/cluster/k8sres_test.go | 134 ------------------------------- 2 files changed, 50 insertions(+), 165 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 4170f324b..2ca636419 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -25,6 +25,7 @@ type ConnectionPoolerObjects struct { Name string ClusterName string Namespace string + Role PostgresRole // It could happen that a connection pooler was enabled, but the operator // was not able to properly process a corresponding event or was restarted. // In this case we will miss missing/require situation and a lookup function @@ -83,11 +84,11 @@ func (c *Cluster) RolesConnectionPooler() []PostgresRole { // have e.g. different `application` label, so that recreatePod operation will // not interfere with it (it lists all the pods via labels, and if there would // be no difference, it will recreate also pooler pods). -func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector { +func (c *Cluster) connectionPoolerLabelsSelector(name string, role PostgresRole) *metav1.LabelSelector { connectionPoolerLabels := labels.Set(map[string]string{}) extraLabels := labels.Set(map[string]string{ - "connection-pooler-name": c.connectionPoolerName(role), + "connection-pooler-name": name, "application": "db-connection-pooler", "role": string(role), "cluster-name": c.Name, @@ -289,7 +290,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( podTemplate := &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Labels: c.connectionPoolerLabelsSelector(c.connectionPoolerName(role), role).MatchLabels, Namespace: c.Namespace, Annotations: c.generatePodAnnotations(spec), }, @@ -306,7 +307,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( return podTemplate, nil } -func (c *Cluster) generateConnectionPoolerDeployment(role PostgresRole) ( +func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *ConnectionPoolerObjects) ( *appsv1.Deployment, error) { spec := &c.Spec @@ -320,7 +321,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(role PostgresRole) ( spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - podTemplate, err := c.generateConnectionPoolerPodTemplate(role) + podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role) numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( @@ -341,9 +342,9 @@ func (c *Cluster) generateConnectionPoolerDeployment(role PostgresRole) ( deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), + Name: connectionPooler.Name, Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Labels: c.connectionPoolerLabelsSelector(connectionPooler.Name, connectionPooler.Role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -355,7 +356,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(role PostgresRole) ( }, Spec: appsv1.DeploymentSpec{ Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(role), + Selector: c.connectionPoolerLabelsSelector(connectionPooler.Name, connectionPooler.Role), Template: *podTemplate, }, } @@ -363,8 +364,9 @@ func (c *Cluster) generateConnectionPoolerDeployment(role PostgresRole) ( return deployment, nil } -func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, role PostgresRole) *v1.Service { +func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service { + spec := &c.Spec // there are two ways to enable connection pooler, either to specify a // connectionPooler section or enableConnectionPooler. In the second case // spec.connectionPooler will be nil, so to make it easier to calculate @@ -378,22 +380,22 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, rol serviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ { - Name: c.connectionPoolerName(role), + Name: connectionPooler.Name, Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)}, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(connectionPooler.Role)}, }, }, Type: v1.ServiceTypeClusterIP, Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(role), + "connection-pooler": connectionPooler.Name, }, } service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(role), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector(role).MatchLabels, + Name: connectionPooler.Name, + Namespace: connectionPooler.Namespace, + Labels: c.connectionPoolerLabelsSelector(connectionPooler.Name, connectionPooler.Role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -416,7 +418,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Lack of connection pooler objects is not a fatal error, just log it if // it was present before in the manifest - if c == nil || role == "" { + if c.ConnectionPooler[role] == nil || role == "" { c.logger.Infof("No connection pooler to delete") return nil } @@ -436,7 +438,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { err = c.KubeClient. Deployments(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) + Delete(context.TODO(), deployment.Name, options) if k8sutil.ResourceNotFound(err) { c.logger.Debugf("Connection pooler deployment was already deleted") @@ -444,7 +446,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { return fmt.Errorf("could not delete deployment: %v", err) } - c.logger.Infof("Connection pooler deployment %q has been deleted for role %s", c.connectionPoolerName(role), role) + c.logger.Infof("Connection pooler deployment %q has been deleted for role %s", deployment.Name, role) } // Repeat the same for the service object @@ -457,7 +459,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { err = c.KubeClient. Services(c.Namespace). - Delete(context.TODO(), c.connectionPoolerName(role), options) + Delete(context.TODO(), service.Name, options) if k8sutil.ResourceNotFound(err) { c.logger.Debugf("Connection pooler service was already deleted") @@ -465,7 +467,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { return fmt.Errorf("could not delete service: %v", err) } - c.logger.Infof("Connection pooler service %q has been deleted for role %s", c.connectionPoolerName(role), role) + c.logger.Infof("Connection pooler service %q has been deleted for role %s", service.Name, role) } // Repeat the same for the secret object secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) @@ -482,7 +484,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { } } - c.ConnectionPooler = nil + c.ConnectionPooler[role] = nil return nil } @@ -517,15 +519,14 @@ func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDe } //updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]string, role PostgresRole) (*appsv1.Deployment, error) { - c.logger.Debugf("updating connection pooler annotations") +func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) { patchData, err := metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) } - result, err := c.KubeClient.Deployments(c.ConnectionPooler[role].Deployment.Namespace).Patch( + result, err := KubeClient.Deployments(deployment.Namespace).Patch( context.TODO(), - c.ConnectionPooler[role].Deployment.Name, + deployment.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}, @@ -677,27 +678,42 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, inst oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec) } } + if c.ConnectionPooler == nil { c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ Master: { Deployment: nil, Service: nil, + Name: c.connectionPoolerName(Master), + ClusterName: c.ClusterName, + Namespace: c.Namespace, LookupFunction: false, + Role: Master, }, Replica: { Deployment: nil, Service: nil, + Name: c.connectionPoolerName(Replica), + ClusterName: c.ClusterName, + Namespace: c.Namespace, LookupFunction: false, + Role: Replica, }, } } + if c.ConnectionPooler[role] == nil { + c.ConnectionPooler[role] = &ConnectionPoolerObjects{ + Deployment: nil, + Service: nil, + LookupFunction: false, + Role: role} + } if newNeedConnectionPooler { // Try to sync in any case. If we didn't needed connection pooler before, // it means we want to create it. If it was already present, still sync // since it could happen that there is no difference in specs, and all // the resources are remembered, but the deployment was manually deleted // in between - c.logger.Debugf("syncing connection pooler for the Spilo role %s", role) // in this case also do not forget to install lookup function as for // creating cluster @@ -724,7 +740,6 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, inst return NoSync, err } } - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { c.logger.Errorf("could not sync connection pooler: %v", err) return reason, err @@ -760,7 +775,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql msg := "Deployment %s for connection pooler synchronization is not found, create it" c.logger.Warningf(msg, c.connectionPoolerName(role)) - deploymentSpec, err := c.generateConnectionPoolerDeployment(role) + deploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) if err != nil { msg = "could not generate deployment for connection pooler: %v" return NoSync, fmt.Errorf(msg, err) @@ -816,7 +831,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if specSync || defaultsSync { c.logger.Infof("Update connection pooler deployment %s, reason: %+v", c.connectionPoolerName(role), reason) - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(role) + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) if err != nil { msg := "could not generate deployment for connection pooler: %v" return reason, fmt.Errorf(msg, err) @@ -836,7 +851,11 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) if newAnnotations != nil { - c.updateConnectionPoolerAnnotations(newAnnotations, role) + deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations) + if err != nil { + return nil, err + } + c.ConnectionPooler[role].Deployment = deployment } service, err := c.KubeClient. @@ -847,7 +866,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql msg := "Service %s for connection pooler synchronization is not found, create it" c.logger.Warningf(msg, c.connectionPoolerName(role)) - serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec, role) + serviceSpec := c.generateConnectionPoolerService(c.ConnectionPooler[role]) service, err := c.KubeClient. Services(serviceSpec.Namespace). Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index be5f73de4..fa4443e06 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -928,45 +928,6 @@ func TestPodEnvironmentSecretVariables(t *testing.T) { } -func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { - cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"] - if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest { - return fmt.Errorf("CPU request doesn't match, got %s, expected %s", - cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest) - } - - memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"] - if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest { - return fmt.Errorf("Memory request doesn't match, got %s, expected %s", - memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest) - } - - cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"] - if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit { - return fmt.Errorf("CPU limit doesn't match, got %s, expected %s", - cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit) - } - - memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"] - if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit { - return fmt.Errorf("Memory limit doesn't match, got %s, expected %s", - memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit) - } - - return nil -} - -func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { - poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] - - if poolerLabels != cluster.connectionPoolerLabelsSelector(role).MatchLabels["connection-pooler"] { - return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", - podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(role).MatchLabels) - } - - return nil -} - func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { required := map[string]bool{ "PGHOST": false, @@ -1012,18 +973,6 @@ func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployme return nil } -func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { - labels := deployment.Spec.Selector.MatchLabels - expected := cluster.connectionPoolerLabelsSelector(Master).MatchLabels - - if labels["connection-pooler"] != expected["connection-pooler"] { - return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", - labels, expected) - } - - return nil -} - func testServiceOwnwerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { owner := service.ObjectMeta.OwnerReferences[0] @@ -1035,89 +984,6 @@ func testServiceOwnwerReference(cluster *Cluster, service *v1.Service, role Post return nil } -func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRole) error { - selector := service.Spec.Selector - - if selector["connection-pooler"] != cluster.connectionPoolerName(role) { - return fmt.Errorf("Selector is incorrect, got %s, expected %s", - selector["connection-pooler"], cluster.connectionPoolerName(role)) - } - - return nil -} - -func TestConnectionPoolerServiceSpec(t *testing.T) { - testName := "Test connection pooler service spec generation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error { - return nil - } - - tests := []struct { - subTest string - spec *acidv1.PostgresSpec - cluster *Cluster - check func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error - }{ - { - subTest: "default configuration", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - cluster: cluster, - check: noCheck, - }, - { - subTest: "owner reference", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - cluster: cluster, - check: testServiceOwnwerReference, - }, - { - subTest: "selector", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - EnableReplicaConnectionPooler: boolToPointer(true), - }, - cluster: cluster, - check: testServiceSelector, - }, - } - for _, role := range [2]PostgresRole{Master, Replica} { - for _, tt := range tests { - service := tt.cluster.generateConnectionPoolerService(tt.spec, role) - - if err := tt.check(cluster, service, role); err != nil { - t.Errorf("%s [%s]: Service spec is incorrect, %+v", - testName, tt.subTest, err) - } - } - } -} - func TestTLS(t *testing.T) { var err error var spec acidv1.PostgresSpec From e88fa0685d2c5d8cdb57b6b976c248274c1f4bc0 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 27 Oct 2020 09:21:56 +0100 Subject: [PATCH 35/40] minor fix in test --- pkg/cluster/connection_pooler_test.go | 939 ++++++++++++++++++++++++++ 1 file changed, 939 insertions(+) create mode 100644 pkg/cluster/connection_pooler_test.go diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go new file mode 100644 index 000000000..e3848c101 --- /dev/null +++ b/pkg/cluster/connection_pooler_test.go @@ -0,0 +1,939 @@ +package cluster + +import ( + "errors" + "fmt" + "strings" + "testing" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func mockInstallLookupFunction(schema string, user string, role PostgresRole) error { + return nil +} + +func boolToPointer(value bool) *bool { + return &value +} + +func TestConnectionPoolerCreationAndDeletion(t *testing.T) { + testName := "Test connection pooler creation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + }, + }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + + reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) + + if err != nil { + t.Errorf("%s: Cannot create connection pooler, %s, %+v", + testName, err, reason) + } + for _, role := range cluster.RolesConnectionPooler() { + if cluster.ConnectionPooler[role] != nil { + if cluster.ConnectionPooler[role].Deployment == nil { + t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) + } + + if cluster.ConnectionPooler[role].Service == nil { + t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) + } + + err = cluster.deleteConnectionPooler(role) + if err != nil { + t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) + } + } + } +} + +func TestNeedConnectionPooler(t *testing.T) { + testName := "Test how connection pooler can be enabled" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !cluster.needMasterConnectionPooler() { + t.Errorf("%s: Connection pooler is not enabled with full definition", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + } + + if !cluster.needMasterConnectionPooler() { + t.Errorf("%s: Connection pooler is not enabled with flag", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if cluster.needMasterConnectionPooler() { + t.Errorf("%s: Connection pooler is still enabled with flag being false", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !cluster.needMasterConnectionPooler() { + t.Errorf("%s: Connection pooler is not enabled with flag and full", + testName) + } + + // Test for replica connection pooler + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is not enabled with full definition", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + } + + if !cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is not enabled with flag", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is still enabled with flag being false", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !cluster.needReplicaConnectionPooler() { + t.Errorf("%s: Replica Connection pooler is not enabled with flag and full", + testName) + } +} + +func int32ToPointer(value int32) *int32 { + return &value +} + +func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil && + (cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil || + *cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) { + return fmt.Errorf("Wrong number of instances") + } + } + return nil +} + +func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + for _, role := range []PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role].Deployment == nil { + return fmt.Errorf("Deployment was not saved %s", role) + } + + if cluster.ConnectionPooler[role].Service == nil { + return fmt.Errorf("Service was not saved %s", role) + } + } + + return nil +} + +func MasterobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + if cluster.ConnectionPooler[Master].Deployment == nil { + return fmt.Errorf("Deployment was not saved") + } + + if cluster.ConnectionPooler[Master].Service == nil { + return fmt.Errorf("Service was not saved") + } + + return nil +} + +func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + if cluster.ConnectionPooler[Replica].Deployment == nil { + return fmt.Errorf("Deployment was not saved") + } + + if cluster.ConnectionPooler[Replica].Service == nil { + return fmt.Errorf("Service was not saved") + } + + return nil +} + +func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil && + (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { + return fmt.Errorf("Connection pooler was not deleted for role %v", role) + } + } + + return nil +} + +func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { + + if cluster.ConnectionPooler[Master] != nil && + (cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) { + return fmt.Errorf("Connection pooler master was not deleted") + } + return nil +} + +func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { + + if cluster.ConnectionPooler[Replica] != nil && + (cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) { + return fmt.Errorf("Connection pooler replica was not deleted") + } + return nil +} + +func noEmptySync(cluster *Cluster, err error, reason SyncReason) error { + for _, msg := range reason { + if strings.HasPrefix(msg, "update [] from '' to '") { + return fmt.Errorf("There is an empty reason, %s", msg) + } + } + + return nil +} + +func TestConnectionPoolerSynchronization(t *testing.T) { + testName := "Test connection pooler synchronization" + newCluster := func() *Cluster { + return New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + } + cluster := newCluster() + + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + clusterMissingObjects := newCluster() + clusterMissingObjects.KubeClient = k8sutil.ClientMissingObjects() + + clusterMock := newCluster() + clusterMock.KubeClient = k8sutil.NewMockKubernetesClient() + + clusterDirtyMock := newCluster() + clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() + clusterDirtyMock.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + } + + clusterDirtyMock.ConnectionPooler[Master].Deployment = &appsv1.Deployment{} + clusterDirtyMock.ConnectionPooler[Master].Service = &v1.Service{} + + clusterDirtyMock.ConnectionPooler[Replica].Deployment = &appsv1.Deployment{} + clusterDirtyMock.ConnectionPooler[Replica].Service = &v1.Service{} + + clusterNewDefaultsMock := newCluster() + clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient() + + tests := []struct { + subTest string + oldSpec *acidv1.Postgresql + newSpec *acidv1.Postgresql + cluster *Cluster + defaultImage string + defaultInstances int32 + check func(cluster *Cluster, err error, reason SyncReason) error + }{ + { + subTest: "create if doesn't exist", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: clusterMissingObjects, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterobjectsAreSaved, + }, + { + subTest: "create if doesn't exist with a flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + }, + }, + cluster: clusterMissingObjects, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterobjectsAreSaved, + }, + { + subTest: "create replica if doesn't exist with a flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + cluster: clusterDirtyMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: ReplicaobjectsAreSaved, + }, + { + subTest: "create no replica with flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + }, + }, + cluster: clusterDirtyMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "create from scratch", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: clusterMissingObjects, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterobjectsAreSaved, + }, + { + subTest: "create both master and replica", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + EnableConnectionPooler: boolToPointer(true), + }, + }, + cluster: clusterMissingObjects, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreSaved, + }, + { + subTest: "delete if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "delete only master if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableConnectionPooler: boolToPointer(true), + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyMasterDeleted, + }, + { + subTest: "delete only replica if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: clusterDirtyMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyReplicaDeleted, + }, + { + subTest: "cleanup if still there", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + cluster: clusterDirtyMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "update deployment", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(1), + }, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(2), + }, + }, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: deploymentUpdated, + }, + { + subTest: "update deployment", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(1), + }, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(2), + }, + }, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: deploymentUpdated, + }, + { + subTest: "update image from changed defaults", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: clusterNewDefaultsMock, + defaultImage: "pooler:2.0", + defaultInstances: 2, + check: deploymentUpdated, + }, + { + subTest: "there is no sync from nil to an empty spec", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: nil, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: clusterMock, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: noEmptySync, + }, + } + for _, tt := range tests { + tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage + tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances = + int32ToPointer(tt.defaultInstances) + + reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec, + tt.newSpec, mockInstallLookupFunction) + + if err := tt.check(tt.cluster, err, reason); err != nil { + t.Errorf("%s [%s]: Could not synchronize, %+v", + testName, tt.subTest, err) + } + } +} + +func TestConnectionPoolerPodSpec(t *testing.T) { + testName := "Test connection pooler pod template generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + MaxDBConnections: int32ToPointer(60), + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + var clusterNoDefaultRes = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{}, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + clusterNoDefaultRes.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + + noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + expected error + cluster *Cluster + check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: noCheck, + }, + { + subTest: "no default resources", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`), + cluster: clusterNoDefaultRes, + check: noCheck, + }, + { + subTest: "default resources are set", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: testResources, + }, + { + subTest: "labels for service", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testLabels, + }, + { + subTest: "required envs", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: testEnvs, + }, + } + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role) + + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } + + err = tt.check(cluster, podSpec, role) + if err != nil { + t.Errorf("%s [%s]: Pod spec is incorrect, %+v", + testName, tt.subTest, err) + } + } + } +} + +func TestConnectionPoolerDeploymentSpec(t *testing.T) { + testName := "Test connection pooler deployment spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + }, + } + + noCheck := func(cluster *Cluster, deployment *appsv1.Deployment) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + expected error + cluster *Cluster + check func(cluster *Cluster, deployment *appsv1.Deployment) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testDeploymentOwnwerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testSelector, + }, + } + for _, tt := range tests { + deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master]) + + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } + + err = tt.check(cluster, deployment) + if err != nil { + t.Errorf("%s [%s]: Deployment spec is incorrect, %+v", + testName, tt.subTest, err) + } + } +} + +func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"] + if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest { + return fmt.Errorf("CPU request doesn't match, got %s, expected %s", + cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest) + } + + memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"] + if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest { + return fmt.Errorf("Memory request doesn't match, got %s, expected %s", + memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest) + } + + cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"] + if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit { + return fmt.Errorf("CPU limit doesn't match, got %s, expected %s", + cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit) + } + + memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"] + if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit { + return fmt.Errorf("Memory limit doesn't match, got %s, expected %s", + memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit) + } + + return nil +} + +func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] + + if poolerLabels != cluster.connectionPoolerLabelsSelector(cluster.connectionPoolerName(role), role).MatchLabels["connection-pooler"] { + return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", + podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(cluster.connectionPoolerName(role), role).MatchLabels) + } + + return nil +} + +func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { + labels := deployment.Spec.Selector.MatchLabels + expected := cluster.connectionPoolerLabelsSelector(cluster.connectionPoolerName(Master), Master).MatchLabels + + if labels["connection-pooler"] != expected["connection-pooler"] { + return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", + labels, expected) + } + + return nil +} + +func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRole) error { + selector := service.Spec.Selector + + if selector["connection-pooler"] != cluster.connectionPoolerName(role) { + return fmt.Errorf("Selector is incorrect, got %s, expected %s", + selector["connection-pooler"], cluster.connectionPoolerName(role)) + } + + return nil +} + +func TestConnectionPoolerServiceSpec(t *testing.T) { + testName := "Test connection pooler service spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + cluster *Cluster + check func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + cluster: cluster, + check: testServiceOwnwerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + cluster: cluster, + check: testServiceSelector, + }, + } + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + service := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role]) + + if err := tt.check(cluster, service, role); err != nil { + t.Errorf("%s [%s]: Service spec is incorrect, %+v", + testName, tt.subTest, err) + } + } + } +} From 4354b020fd3c2e73c7f73031a4ad48943e20c7d9 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 27 Oct 2020 10:29:23 +0100 Subject: [PATCH 36/40] another test case fix --- pkg/cluster/connection_pooler_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index e3848c101..a24050594 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -889,6 +889,26 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { Name: "test-sts", }, } + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + Name: cluster.connectionPoolerName(Master), + ClusterName: cluster.ClusterName, + Namespace: cluster.Namespace, + Role: Master, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + Name: cluster.connectionPoolerName(Replica), + ClusterName: cluster.ClusterName, + Namespace: cluster.Namespace, + Role: Replica, + }, + } noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error { return nil From 2ed0e057c5ac8eb9a94fb69a46c4efca7b34c29e Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 27 Oct 2020 15:42:47 +0100 Subject: [PATCH 37/40] Nitpicks and other changes --- pkg/cluster/connection_pooler.go | 56 +++++++++++---------------- pkg/cluster/connection_pooler_test.go | 8 ++-- 2 files changed, 27 insertions(+), 37 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 2ca636419..576b0c07e 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -14,11 +14,12 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" ) -// K8S objects that are belongs to a connection pooler +// K8S objects that are belong to connection pooler type ConnectionPoolerObjects struct { Deployment *appsv1.Deployment Service *v1.Service @@ -33,8 +34,8 @@ type ConnectionPoolerObjects struct { // this, we can remember the result in memory at least until the next // restart. LookupFunction bool - // Careful with referencing cluster.spec this object pointer changes during runtime and lifetime of cluster - // Cluster *cluster + // Careful with referencing cluster.spec this object pointer changes + // during runtime and lifetime of cluster } func (c *Cluster) connectionPoolerName(role PostgresRole) string { @@ -47,7 +48,8 @@ func (c *Cluster) connectionPoolerName(role PostgresRole) string { // isConnectionPoolerEnabled func (c *Cluster) needConnectionPooler() bool { - return needMasterConnectionPoolerWorker(&c.Spec) || needReplicaConnectionPoolerWorker(&c.Spec) + return needMasterConnectionPoolerWorker(&c.Spec) || + needReplicaConnectionPoolerWorker(&c.Spec) } func (c *Cluster) needMasterConnectionPooler() bool { @@ -55,7 +57,8 @@ func (c *Cluster) needMasterConnectionPooler() bool { } func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || + (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) } func (c *Cluster) needReplicaConnectionPooler() bool { @@ -63,7 +66,8 @@ func (c *Cluster) needReplicaConnectionPooler() bool { } func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - return spec.EnableReplicaConnectionPooler != nil && *spec.EnableReplicaConnectionPooler + return spec.EnableReplicaConnectionPooler != nil && + *spec.EnableReplicaConnectionPooler } // RolesConnectionPooler gives the list of roles which need connection pooler @@ -210,7 +214,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) resources, err := generateResourceRequirements( spec.ConnectionPooler.Resources, - c.makeDefaultConnectionPoolerResources()) + makeDefaultConnectionPoolerResources(&c.OpConfig)) effectiveDockerImage := util.Coalesce( spec.ConnectionPooler.DockerImage, @@ -320,8 +324,8 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio if spec.ConnectionPooler == nil { spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role) + numberOfInstances := spec.ConnectionPooler.NumberOfInstances if numberOfInstances == nil { numberOfInstances = util.CoalesceInt32( @@ -343,7 +347,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: connectionPooler.Name, - Namespace: c.Namespace, + Namespace: connectionPooler.Namespace, Labels: c.connectionPoolerLabelsSelector(connectionPooler.Name, connectionPooler.Role).MatchLabels, Annotations: map[string]string{}, // make StatefulSet object its owner to represent the dependency. @@ -597,7 +601,7 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler } expectedResources, err := generateResourceRequirements(spec.Resources, - c.makeDefaultConnectionPoolerResources()) + makeDefaultConnectionPoolerResources(&c.OpConfig)) // An error to generate expected resources means something is not quite // right, but for the purpose of robustness do not panic here, just report @@ -639,8 +643,7 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler // Generate default resource section for connection pooler deployment, to be // used if nothing custom is specified in the manifest -func (c Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources { - config := c.OpConfig +func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resources { defaultRequests := acidv1.ResourceDescription{ CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, @@ -679,34 +682,21 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, inst } } + // if the call is via createConnectionPooler, then it is required to initialize + // the structure if c.ConnectionPooler == nil { - c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { - Deployment: nil, - Service: nil, - Name: c.connectionPoolerName(Master), - ClusterName: c.ClusterName, - Namespace: c.Namespace, - LookupFunction: false, - Role: Master, - }, - Replica: { - Deployment: nil, - Service: nil, - Name: c.connectionPoolerName(Replica), - ClusterName: c.ClusterName, - Namespace: c.Namespace, - LookupFunction: false, - Role: Replica, - }, - } + c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} } if c.ConnectionPooler[role] == nil { c.ConnectionPooler[role] = &ConnectionPoolerObjects{ Deployment: nil, Service: nil, + Name: c.connectionPoolerName(role), + ClusterName: c.ClusterName, + Namespace: c.Namespace, LookupFunction: false, - Role: role} + Role: role, + } } if newNeedConnectionPooler { // Try to sync in any case. If we didn't needed connection pooler before, diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index a24050594..7050e4ef3 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -23,6 +23,10 @@ func boolToPointer(value bool) *bool { return &value } +func int32ToPointer(value int32) *int32 { + return &value +} + func TestConnectionPoolerCreationAndDeletion(t *testing.T) { testName := "Test connection pooler creation" var cluster = New( @@ -175,10 +179,6 @@ func TestNeedConnectionPooler(t *testing.T) { } } -func int32ToPointer(value int32) *int32 { - return &value -} - func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { for _, role := range [2]PostgresRole{Master, Replica} { if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil && From 07bb270a5590a0a747a1e76ca6be0f3c8806d63c Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Tue, 27 Oct 2020 17:03:34 +0100 Subject: [PATCH 38/40] cleanup and avoid using cluster in more functions --- pkg/cluster/cluster.go | 2 +- pkg/cluster/connection_pooler.go | 46 ++++++++++++--------------- pkg/cluster/connection_pooler_test.go | 18 +++++------ pkg/cluster/sync.go | 2 +- 4 files changed, 31 insertions(+), 37 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index e671355d1..ba8018e25 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -899,7 +899,7 @@ func (c *Cluster) initSystemUsers() { // Connection pooler user is an exception, if requested it's going to be // created by operator as a normal pgUser - if c.needConnectionPooler() { + if needConnectionPooler(&c.Spec) { // initialize empty connection pooler if not done yet if c.Spec.ConnectionPooler == nil { c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 576b0c07e..a78ec5f3f 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -3,8 +3,10 @@ package cluster import ( "context" "fmt" + "strings" "github.com/r3labs/diff" + acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -47,13 +49,13 @@ func (c *Cluster) connectionPoolerName(role PostgresRole) string { } // isConnectionPoolerEnabled -func (c *Cluster) needConnectionPooler() bool { - return needMasterConnectionPoolerWorker(&c.Spec) || - needReplicaConnectionPoolerWorker(&c.Spec) +func needConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needMasterConnectionPoolerWorker(spec) || + needReplicaConnectionPoolerWorker(spec) } -func (c *Cluster) needMasterConnectionPooler() bool { - return needMasterConnectionPoolerWorker(&c.Spec) +func needMasterConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needMasterConnectionPoolerWorker(spec) } func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { @@ -61,8 +63,8 @@ func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) } -func (c *Cluster) needReplicaConnectionPooler() bool { - return needReplicaConnectionPoolerWorker(&c.Spec) +func needReplicaConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needReplicaConnectionPoolerWorker(spec) } func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { @@ -70,19 +72,6 @@ func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { *spec.EnableReplicaConnectionPooler } -// RolesConnectionPooler gives the list of roles which need connection pooler -func (c *Cluster) RolesConnectionPooler() []PostgresRole { - roles := make([]PostgresRole, 2) - - if needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Master) - } - if needMasterConnectionPoolerWorker(&c.Spec) { - roles = append(roles, Replica) - } - return roles -} - // Return connection pooler labels selector, which should from one point of view // inherit most of the labels from the cluster itself, but at the same time // have e.g. different `application` label, so that recreatePod operation will @@ -570,12 +559,12 @@ func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (s // Check if we need to synchronize connection pooler deployment due to new // defaults, that are different from what we see in the DeploymentSpec -func (c *Cluster) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { +func needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { reasons = []string{} sync = false - config := c.OpConfig.ConnectionPooler + config := Config.OpConfig.ConnectionPooler podTemplate := deployment.Spec.Template poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] @@ -601,7 +590,7 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler } expectedResources, err := generateResourceRequirements(spec.Resources, - makeDefaultConnectionPoolerResources(&c.OpConfig)) + makeDefaultConnectionPoolerResources(&Config.OpConfig)) // An error to generate expected resources means something is not quite // right, but for the purpose of robustness do not panic here, just report @@ -615,14 +604,19 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(spec *acidv1.ConnectionPooler } if err != nil { - c.logger.Warningf("Cannot generate expected resources, %v", err) + return false, reasons } for _, env := range poolerContainer.Env { if spec.User == "" && env.Name == "PGUSER" { ref := env.ValueFrom.SecretKeyRef.LocalObjectReference + secretName := Config.OpConfig.SecretNameTemplate.Format( + "username", strings.Replace(config.User, "_", "-", -1), + "cluster", deployment.ClusterName, + "tprkind", acidv1.PostgresCRDResourceKind, + "tprgroup", acidzalando.GroupName) - if ref.Name != c.credentialSecretName(config.User) { + if ref.Name != secretName { sync = true msg := fmt.Sprintf("pooler user is different (having %s, required %s)", ref.Name, config.User) @@ -815,7 +809,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) } - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) + defaultsSync, defaultsReason := needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) reason := append(specReason, defaultsReason...) if specSync || defaultsSync { diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index 7050e4ef3..4f3f27176 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -64,7 +64,7 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) { t.Errorf("%s: Cannot create connection pooler, %s, %+v", testName, err, reason) } - for _, role := range cluster.RolesConnectionPooler() { + for _, role := range [2]PostgresRole{Master, Replica} { if cluster.ConnectionPooler[role] != nil { if cluster.ConnectionPooler[role].Deployment == nil { t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) @@ -105,7 +105,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if !cluster.needMasterConnectionPooler() { + if !needMasterConnectionPooler(&cluster.Spec) { t.Errorf("%s: Connection pooler is not enabled with full definition", testName) } @@ -114,7 +114,7 @@ func TestNeedConnectionPooler(t *testing.T) { EnableConnectionPooler: boolToPointer(true), } - if !cluster.needMasterConnectionPooler() { + if !needMasterConnectionPooler(&cluster.Spec) { t.Errorf("%s: Connection pooler is not enabled with flag", testName) } @@ -124,7 +124,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if cluster.needMasterConnectionPooler() { + if needMasterConnectionPooler(&cluster.Spec) { t.Errorf("%s: Connection pooler is still enabled with flag being false", testName) } @@ -134,7 +134,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if !cluster.needMasterConnectionPooler() { + if !needMasterConnectionPooler(&cluster.Spec) { t.Errorf("%s: Connection pooler is not enabled with flag and full", testName) } @@ -144,7 +144,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if cluster.needReplicaConnectionPooler() { + if needReplicaConnectionPooler(&cluster.Spec) { t.Errorf("%s: Replica Connection pooler is not enabled with full definition", testName) } @@ -153,7 +153,7 @@ func TestNeedConnectionPooler(t *testing.T) { EnableReplicaConnectionPooler: boolToPointer(true), } - if !cluster.needReplicaConnectionPooler() { + if !needReplicaConnectionPooler(&cluster.Spec) { t.Errorf("%s: Replica Connection pooler is not enabled with flag", testName) } @@ -163,7 +163,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if cluster.needReplicaConnectionPooler() { + if needReplicaConnectionPooler(&cluster.Spec) { t.Errorf("%s: Replica Connection pooler is still enabled with flag being false", testName) } @@ -173,7 +173,7 @@ func TestNeedConnectionPooler(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, } - if !cluster.needReplicaConnectionPooler() { + if !needReplicaConnectionPooler(&cluster.Spec) { t.Errorf("%s: Replica Connection pooler is not enabled with flag and full", testName) } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 49408fbf7..8961fa80d 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -560,7 +560,7 @@ func (c *Cluster) syncRoles() (err error) { userNames = append(userNames, u.Name) } - if c.needMasterConnectionPooler() || c.needReplicaConnectionPooler() { + if needMasterConnectionPooler(&c.Spec) || needReplicaConnectionPooler(&c.Spec) { connectionPoolerUser := c.systemUsers[constants.ConnectionPoolerUserKeyName] userNames = append(userNames, connectionPoolerUser.Name) From 49613bd44e8e115115c37a3c5547b8ea697815ad Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Thu, 29 Oct 2020 09:29:26 +0100 Subject: [PATCH 39/40] fix --- pkg/cluster/cluster.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index cf918d099..6c1505b3c 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -757,7 +757,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // need to process. In the future we may want to do this more careful and // check which databases we need to process, but even repeating the whole // installation process should be good enough. - c.ConnectionPooler.LookupFunction = false if _, err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil { From 7b4e85049bc2bc4b0fef128ea9d83582991c2156 Mon Sep 17 00:00:00 2001 From: Rafia Sabih Date: Thu, 29 Oct 2020 11:57:17 +0100 Subject: [PATCH 40/40] minor fix --- pkg/cluster/connection_pooler.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index a78ec5f3f..8fedf48ed 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -406,7 +406,6 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo //delete connection pooler func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { - //c.setProcessName("deleting connection pooler") c.logger.Debugln("deleting connection pooler") // Lack of connection pooler objects is not a fatal error, just log it if