Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support inherited annotations for all major objects #1236

Merged
merged 18 commits into from
Dec 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 4 additions & 0 deletions charts/postgres-operator/crds/operatorconfigurations.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,10 @@ spec:
type: string
template:
type: boolean
inherited_annotations:
type: array
items:
type: string
inherited_labels:
type: array
items:
Expand Down
6 changes: 5 additions & 1 deletion charts/postgres-operator/values-crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,11 @@ configKubernetes:
# namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles

# list of labels that can be inherited from the cluster manifest
# list of annotation keys that can be inherited from the cluster manifest
# inherited_annotations:
# - owned-by

# list of label keys that can be inherited from the cluster manifest
# inherited_labels:
# - application
# - environment
Expand Down
5 changes: 4 additions & 1 deletion charts/postgres-operator/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,10 @@ configKubernetes:
# namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles

# list of labels that can be inherited from the cluster manifest
# list of annotation keys that can be inherited from the cluster manifest
# inherited_annotations: owned-by

# list of label keys that can be inherited from the cluster manifest
# inherited_labels: application,environment

# timeout for successful migration of master pods from unschedulable node
Expand Down
21 changes: 14 additions & 7 deletions docs/reference/operator_parameters.md
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,12 @@ configuration they are grouped under the `kubernetes` key.
are extracted. For the ConfigMap this has to be a string which allows
referencing only one infrastructure roles secret. The default is empty.

* **inherited_annotations**
list of annotation keys that can be inherited from the cluster manifest, and
added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PDB` and
`Services`) created by the operator incl. the ones from the connection
pooler deployment. The default is empty.

* **pod_role_label**
name of the label assigned to the Postgres pods (and services/endpoints) by
the operator. The default is `spilo-role`.
Expand All @@ -280,15 +286,16 @@ configuration they are grouped under the `kubernetes` key.
objects. The default is `application:spilo`.

* **inherited_labels**
list of labels that can be inherited from the cluster manifest, and added to
each child objects (`StatefulSet`, `Pod`, `Service` and `Endpoints`) created
by the operator. Typical use case is to dynamically pass labels that are
specific to a given Postgres cluster, in order to implement `NetworkPolicy`.
The default is empty.
list of label keys that can be inherited from the cluster manifest, and
added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PVCs`,
`PDB`, `Service`, `Endpoints` and `Secrets`) created by the operator.
Typical use case is to dynamically pass labels that are specific to a
given Postgres cluster, in order to implement `NetworkPolicy`. The default
is empty.

* **cluster_name_label**
name of the label assigned to Kubernetes objects created by the operator that
indicates which cluster a given object belongs to. The default is
name of the label assigned to Kubernetes objects created by the operator
that indicates which cluster a given object belongs to. The default is
`cluster-name`.

* **node_readiness_label**
Expand Down
4 changes: 2 additions & 2 deletions e2e/tests/k8s_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def check_service_annotations(self, svc_labels, annotations, namespace='default'
for svc in svcs:
for key, value in annotations.items():
if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value:
print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotations))
print("Expected key {} not found in service annotations {}".format(key, svc.metadata.annotations))
return False
return True

Expand All @@ -126,7 +126,7 @@ def check_statefulset_annotations(self, sset_labels, annotations, namespace='def
for sset in ssets:
for key, value in annotations.items():
if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value:
print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotations))
print("Expected key {} not found in statefulset annotations {}".format(key, sset.metadata.annotations))
return False
return True

Expand Down
7 changes: 4 additions & 3 deletions e2e/tests/test_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -852,6 +852,7 @@ def test_statefulset_annotation_propagation(self):
patch_sset_propagate_annotations = {
"data": {
"downscaler_annotations": "deployment-time,downscaler/*",
"inherited_annotations": "owned-by",
}
}
k8s.update_config(patch_sset_propagate_annotations)
Expand All @@ -861,6 +862,7 @@ def test_statefulset_annotation_propagation(self):
"annotations": {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
"owned-by": "acid",
},
}
}
Expand All @@ -870,10 +872,9 @@ def test_statefulset_annotation_propagation(self):
annotations = {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
"owned-by": "acid",
}

self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")

self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")

@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
Expand Down
1 change: 1 addition & 0 deletions manifests/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ data:
# kubernetes_use_configmaps: "false"
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
# inherited_annotations: owned-by
# inherited_labels: application,environment
# kube_iam_role: ""
# log_s3_bucket: ""
Expand Down
4 changes: 4 additions & 0 deletions manifests/operatorconfiguration.crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ spec:
type: string
template:
type: boolean
inherited_annotations:
type: array
items:
type: string
inherited_labels:
type: array
items:
Expand Down
2 changes: 2 additions & 0 deletions manifests/postgresql-operator-default-configuration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ configuration:
# - secretname: "other-infrastructure-role"
# userkey: "other-user-key"
# passwordkey: "other-password-key"
# inherited_annotations:
# - owned-by
# inherited_labels:
# - application
# - environment
Expand Down
10 changes: 9 additions & 1 deletion pkg/apis/acid.zalan.do/v1/crds.go
Original file line number Diff line number Diff line change
Expand Up @@ -963,6 +963,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
},
},
},
"inherited_annotations": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "string",
},
},
},
"inherited_labels": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Expand Down Expand Up @@ -1400,7 +1408,7 @@ func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourc
},
Scope: apiextv1.NamespaceScoped,
Versions: []apiextv1.CustomResourceDefinitionVersion{
apiextv1.CustomResourceDefinitionVersion{
{
Name: SchemeGroupVersion.Version,
Served: true,
Storage: true,
Expand Down
3 changes: 2 additions & 1 deletion pkg/apis/acid.zalan.do/v1/operator_configuration_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ type KubernetesMetaConfiguration struct {
PodRoleLabel string `json:"pod_role_label,omitempty"`
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
InheritedLabels []string `json:"inherited_labels,omitempty"`
InheritedAnnotations []string `json:"inherited_annotations,omitempty"`
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"`
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
Expand Down Expand Up @@ -167,7 +168,7 @@ type ScalyrConfiguration struct {
ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"`
}

// Defines default configuration for connection pooler
// ConnectionPoolerConfiguration defines default configuration for connection pooler
type ConnectionPoolerConfiguration struct {
NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"`
Schema string `json:"connection_pooler_schema,omitempty"`
Expand Down
15 changes: 10 additions & 5 deletions pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 5 additions & 5 deletions pkg/cluster/connection_pooler.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
ObjectMeta: metav1.ObjectMeta{
Labels: c.connectionPoolerLabels(role, true).MatchLabels,
Namespace: c.Namespace,
Annotations: c.generatePodAnnotations(spec),
Annotations: c.annotationsSet(c.generatePodAnnotations(spec)),
},
Spec: v1.PodSpec{
ServiceAccountName: c.OpConfig.PodServiceAccountName,
Expand Down Expand Up @@ -325,7 +325,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio

if *numberOfInstances < constants.ConnectionPoolerMinInstances {
msg := "Adjusted number of connection pooler instances from %d to %d"
c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances)
c.logger.Warningf(msg, *numberOfInstances, constants.ConnectionPoolerMinInstances)

*numberOfInstances = constants.ConnectionPoolerMinInstances
}
Expand All @@ -339,7 +339,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
Name: connectionPooler.Name,
Namespace: connectionPooler.Namespace,
Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels,
Annotations: map[string]string{},
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
// make StatefulSet object its owner to represent the dependency.
// By itself StatefulSet is being deleted with "Orphaned"
// propagation policy, which means that it's deletion will not
Expand Down Expand Up @@ -390,7 +390,7 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
Name: connectionPooler.Name,
Namespace: connectionPooler.Namespace,
Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels,
Annotations: map[string]string{},
Annotations: c.annotationsSet(c.generateServiceAnnotations(connectionPooler.Role, spec)),
// make StatefulSet object its owner to represent the dependency.
// By itself StatefulSet is being deleted with "Orphaned"
// propagation policy, which means that it's deletion will not
Expand Down Expand Up @@ -866,7 +866,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
}
}

newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations)
newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations))
if newAnnotations != nil {
deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations)
if err != nil {
Expand Down
34 changes: 19 additions & 15 deletions pkg/cluster/k8sres.go
Original file line number Diff line number Diff line change
Expand Up @@ -1174,13 +1174,13 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)

annotations := c.generatePodAnnotations(spec)
podAnnotations := c.generatePodAnnotations(spec)

// generate pod template for the statefulset, based on the spilo container and sidecars
podTemplate, err = c.generatePodTemplate(
c.Namespace,
c.labelsSet(true),
annotations,
c.annotationsSet(podAnnotations),
spiloContainer,
initContainers,
sidecarContainers,
Expand Down Expand Up @@ -1226,15 +1226,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
}

annotations = make(map[string]string)
annotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false)
stsAnnotations := make(map[string]string)
stsAnnotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false)
stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil))

statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: c.statefulSetName(),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Annotations: c.AnnotationsToPropagate(annotations),
Annotations: stsAnnotations,
},
Spec: appsv1.StatefulSetSpec{
Replicas: &numberOfInstances,
Expand Down Expand Up @@ -1527,9 +1528,10 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
username := pgUser.Name
secret := v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: c.credentialSecretName(username),
Namespace: namespace,
Labels: c.labelsSet(true),
Name: c.credentialSecretName(username),
Namespace: namespace,
Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil),
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
Expand Down Expand Up @@ -1603,7 +1605,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
Name: c.serviceName(role),
Namespace: c.Namespace,
Labels: c.roleLabelsSet(true, role),
Annotations: c.generateServiceAnnotations(role, spec),
Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)),
},
Spec: serviceSpec,
}
Expand Down Expand Up @@ -1806,9 +1808,10 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget

return &policybeta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: c.podDisruptionBudgetName(),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Name: c.podDisruptionBudgetName(),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil),
},
Spec: policybeta1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Expand Down Expand Up @@ -1928,9 +1931,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {

cronJob := &batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: c.getLogicalBackupJobName(),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Name: c.getLogicalBackupJobName(),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil),
},
Spec: batchv1beta1.CronJobSpec{
Schedule: schedule,
Expand Down