From 5314db668fd90371086a70ae55b7640ce39eed22 Mon Sep 17 00:00:00 2001 From: Abhishek Dasgupta Date: Sat, 25 Sep 2021 16:00:13 +0530 Subject: [PATCH] Enabled jobs instead of cronjobs to take care of running compaction. --- api/v1alpha1/etcd_types.go | 4 + .../templates/etcd-compaction-cronjob.yaml | 241 --- charts/etcd/values.yaml | 12 +- .../crd/bases/druid.gardener.cloud_etcds.yaml | 28 +- config/rbac/role.yaml | 24 + controllers/config/compaction.go | 23 + controllers/controller_ref_manager.go | 237 ++- controllers/controllers_suite_test.go | 29 +- controllers/etcd_controller.go | 537 ++--- controllers/etcd_controller_test.go | 1800 ++++++++--------- controllers/lease_controller.go | 456 +++++ main.go | 48 +- .../etcd-druid/api/v1alpha1/etcd_types.go | 4 + 13 files changed, 1820 insertions(+), 1623 deletions(-) delete mode 100644 charts/etcd/templates/etcd-compaction-cronjob.yaml create mode 100644 controllers/config/compaction.go create mode 100644 controllers/lease_controller.go diff --git a/api/v1alpha1/etcd_types.go b/api/v1alpha1/etcd_types.go index 12a44e874..8e3fad97b 100644 --- a/api/v1alpha1/etcd_types.go +++ b/api/v1alpha1/etcd_types.go @@ -120,6 +120,10 @@ type BackupSpec struct { // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // CompactionResources defines the compute Resources required by compaction job. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + CompactionResources *corev1.ResourceRequirements `json:"compactionResources,omitempty"` // FullSnapshotSchedule defines the cron standard schedule for full snapshots. // +optional FullSnapshotSchedule *string `json:"fullSnapshotSchedule,omitempty"` diff --git a/charts/etcd/templates/etcd-compaction-cronjob.yaml b/charts/etcd/templates/etcd-compaction-cronjob.yaml deleted file mode 100644 index cf9eabd7e..000000000 --- a/charts/etcd/templates/etcd-compaction-cronjob.yaml +++ /dev/null @@ -1,241 +0,0 @@ -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: {{ .Values.cronJobName }} - namespace: {{ .Release.Namespace }} - annotations: - gardener.cloud/owned-by: "{{ .Release.Namespace }}/{{ .Values.name }}" - gardener.cloud/owner-type: "etcd" -{{- if .Values.annotations }} -{{ toYaml .Values.annotations | indent 4 }} -{{- end }} - labels: - name: etcd - instance: {{ .Values.name }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 4 }} -{{- end }} - ownerReferences: - - apiVersion: druid.gardener.cloud/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: Etcd - name: {{ .Values.name }} - uid: {{ .Values.uid }} -spec: - schedule: {{ .Values.backup.backupCompactionSchedule }} - concurrencyPolicy: Forbid -{{- if eq (int $.Values.statefulsetReplicas) 0 }} - suspend: true -{{- else }} - suspend: false -{{- end }} - jobTemplate: - spec: - backoffLimit: 0 - template: - metadata: - annotations: - checksum/etcd-configmap: {{ include (print $.Template.BasePath "/etcd-configmap.yaml") . | sha256sum }} -{{- if .Values.annotations }} -{{ toYaml .Values.annotations | indent 12 }} -{{- end }} - labels: - name: etcd - instance: {{ .Values.name }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 12 }} -{{- end }} - spec: - restartPolicy: Never - containers: - - name: compact-backup - image: {{ .Values.backup.image }} - imagePullPolicy: {{ .Values.backup.pullPolicy }} - command: - - etcdbrctl - - compact - - --data-dir=/var/etcd/data - - --snapstore-temp-directory=/var/etcd/data/tmp -{{- if .Values.store.storageProvider }} - - --storage-provider={{ .Values.store.storageProvider }} -{{- end }} -{{- if .Values.store.storePrefix }} - - --store-prefix={{ .Values.store.storePrefix }} -{{- end }} -{{- if .Values.backup.etcdQuotaBytes }} - - --embedded-etcd-quota-bytes={{ int $.Values.backup.etcdQuotaBytes }} -{{- end }} -{{- if .Values.store.storageContainer }} - - --store-container={{ .Values.store.storageContainer }} -{{- end }} -{{- if .Values.backup.etcdSnapshotTimeout }} - - --etcd-snapshot-timeout={{ .Values.backup.etcdSnapshotTimeout }} -{{- end }} -{{- if .Values.etcd.etcdDefragTimeout }} - - --etcd-defrag-timeout={{ .Values.etcd.etcdDefragTimeout }} -{{- end }} - ports: - - containerPort: {{ .Values.backup.port }} - name: server - protocol: TCP - resources: -{{- if .Values.backup.enableBackupCompactionJobTempFS }} -{{ toYaml .Values.backup.compactionResourcesTempFS | indent 14 }} -{{- else }} -{{ toYaml .Values.backup.compactionResources | indent 14 }} -{{- end }} - env: - - name: STORAGE_CONTAINER - value: {{ .Values.store.storageContainer }} -{{- if eq .Values.store.storageProvider "S3" }} - - name: "AWS_REGION" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "region" - - name: "AWS_SECRET_ACCESS_KEY" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "secretAccessKey" - - name: "AWS_ACCESS_KEY_ID" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "accessKeyID" -{{- else if eq .Values.store.storageProvider "ABS" }} - - name: "STORAGE_ACCOUNT" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "storageAccount" - - name: "STORAGE_KEY" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "storageKey" -{{- else if eq .Values.store.storageProvider "GCS" }} - - name: "GOOGLE_APPLICATION_CREDENTIALS" - value: "/root/.gcp/serviceaccount.json" -{{- else if eq .Values.store.storageProvider "Swift" }} - - name: "OS_AUTH_URL" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "authURL" - - name: "OS_DOMAIN_NAME" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "domainName" - - name: "OS_USERNAME" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "username" - - name: "OS_PASSWORD" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "password" - - name: "OS_TENANT_NAME" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "tenantName" -{{- else if eq .Values.store.storageProvider "OSS" }} - - name: "ALICLOUD_ENDPOINT" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "storageEndpoint" - - name: "ALICLOUD_ACCESS_KEY_SECRET" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "accessKeySecret" - - name: "ALICLOUD_ACCESS_KEY_ID" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "accessKeyID" -{{- else if eq .Values.store.storageProvider "ECS" }} - - name: "ECS_ENDPOINT" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "endpoint" - - name: "ECS_ACCESS_KEY_ID" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "accessKeyID" - - name: "ECS_SECRET_ACCESS_KEY" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "secretAccessKey" -{{- else if eq .Values.store.storageProvider "OCS" }} - - name: "OCS_ENDPOINT" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "endpoint" - - name: "OCS_ACCESS_KEY_ID" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "accessKeyID" - - name: "OCS_SECRET_ACCESS_KEY" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "secretAccessKey" - - name: "OCS_REGION" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "region" - - name: "OCS_DISABLE_SSL" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "disableSSL" - optional: true - - name: "OCS_INSECURE_SKIP_VERIFY" - valueFrom: - secretKeyRef: - name: {{ .Values.store.storeSecret }} - key: "insecureSkipVerify" - optional: true -{{- end }} - volumeMounts: - - name: etcd-workspace-dir - mountPath: /var/etcd/data - - name: etcd-config-file - mountPath: /var/etcd/config/ -{{- if eq .Values.store.storageProvider "GCS" }} - - name: etcd-backup - mountPath: "/root/.gcp/" -{{- end }} - volumes: - - name: etcd-workspace-dir -{{- if .Values.backup.enableBackupCompactionJobTempFS }} - emptyDir: - medium: Memory -{{- else }} - emptyDir: {} -{{- end }} - - name: etcd-config-file - configMap: - name: {{ .Values.configMapName }} - defaultMode: 0644 - items: - - key: etcd.conf.yaml - path: etcd.conf.yaml -{{- if eq .Values.store.storageProvider "GCS" }} - - name: etcd-backup - secret: - secretName: {{ .Values.store.storeSecret }} -{{- end }} \ No newline at end of file diff --git a/charts/etcd/values.yaml b/charts/etcd/values.yaml index 8da1ed534..8c7253cb0 100644 --- a/charts/etcd/values.yaml +++ b/charts/etcd/values.yaml @@ -2,6 +2,7 @@ name: test uid: uuid-of-etcd-resource serviceName: test configMapName: test +jobName: test replicas: 1 #priorityClassName: foo @@ -51,17 +52,10 @@ backup: compactionResources: limits: cpu: 700m - memory: 3Gi + memory: 4Gi requests: cpu: 500m - memory: 2Gi - compactionResourcesTempFS: - limits: - cpu: 900m - memory: 12Gi - requests: - cpu: 700m - memory: 10Gi + memory: 3Gi # compression: # enabled: true # policy: "gzip" diff --git a/config/crd/bases/druid.gardener.cloud_etcds.yaml b/config/crd/bases/druid.gardener.cloud_etcds.yaml index 604f9a49f..2186589e5 100644 --- a/config/crd/bases/druid.gardener.cloud_etcds.yaml +++ b/config/crd/bases/druid.gardener.cloud_etcds.yaml @@ -49,9 +49,28 @@ spec: backupCompactionSchedule: description: BackupCompactionSchedule defines the cron standard for compacting the snapstore type: string - etcdSnapshotTimeout: - description: EtcdSnapshotTimeout defines the timeout duration for etcd FullSnapshot operation - type: string + compactionResources: + description: 'CompactionResources defines the compute Resources required by compaction job. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object compression: description: SnapshotCompression defines the specification for compression of Snapshots. properties: @@ -78,6 +97,9 @@ spec: enableProfiling: description: EnableProfiling defines if profiling should be enabled for the etcd-backup-restore-sidecar type: boolean + etcdSnapshotTimeout: + description: EtcdSnapshotTimeout defines the timeout duration for etcd FullSnapshot operation + type: string fullSnapshotSchedule: description: FullSnapshotSchedule defines the cron standard schedule for full snapshots. type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7f249764a..4894b4b92 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -6,6 +6,30 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - druid.gardener.cloud resources: diff --git a/controllers/config/compaction.go b/controllers/config/compaction.go new file mode 100644 index 000000000..b636472fb --- /dev/null +++ b/controllers/config/compaction.go @@ -0,0 +1,23 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +// CompactionConfig contains configuration for the compaction controller. +type CompactionConfig struct { + // EventsThreshold is Total number of events that can be allowed before a compaction job is triggered + EventsThreshold int64 + // ActiveDeadlineDuration is the duration after which a running compaction job will be killed (Ex: "300ms", "20s", "-1.5h" or "2h45m") + ActiveDeadlineDuration string +} diff --git a/controllers/controller_ref_manager.go b/controllers/controller_ref_manager.go index 60c8619d9..d03add6b9 100644 --- a/controllers/controller_ref_manager.go +++ b/controllers/controller_ref_manager.go @@ -31,11 +31,13 @@ import ( "k8s.io/client-go/tools/cache" appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1beta1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" "github.com/gardener/etcd-druid/pkg/common" + "github.com/gardener/etcd-druid/pkg/utils" + "github.com/gardener/gardener/pkg/utils/imagevector" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -258,32 +260,32 @@ func (m *EtcdDruidRefManager) ClaimStatefulsets(ctx context.Context, statefulSet return claimed, utilerrors.NewAggregate(errlist) } -func (m *EtcdDruidRefManager) ClaimCronJob(ctx context.Context, cronJob *batchv1.CronJob, filters ...func(*batchv1.CronJob) bool) (*batchv1.CronJob, error) { +func (m *EtcdDruidRefManager) ClaimJob(ctx context.Context, job *batchv1.Job, filters ...func(*batchv1.Job) bool) (*batchv1.Job, error) { var ( - claimed *batchv1.CronJob + claimed *batchv1.Job errlist []error ) match := func(obj metav1.Object) bool { - cj := obj.(*batchv1.CronJob) + j := obj.(*batchv1.Job) // Check selector first so filters only run on potentially matching cronjobs. - if !m.Selector.Matches(labels.Set(cj.Labels)) { + if !m.Selector.Matches(labels.Set(j.Labels)) { return false } for _, filter := range filters { - if !filter(cj) { + if !filter(j) { return false } } return true } - ok, err := m.claimObject(ctx, cronJob, match, m.AdoptResource, m.ReleaseResource) + ok, err := m.claimObject(ctx, job, match, m.AdoptResource, m.ReleaseResource) if err != nil { errlist = append(errlist, err) } if ok { - claimed = cronJob.DeepCopy() + claimed = job.DeepCopy() } return claimed, utilerrors.NewAggregate(errlist) } @@ -422,8 +424,8 @@ func (m *EtcdDruidRefManager) AdoptResource(ctx context.Context, obj client.Obje if err := controllerutil.SetControllerReference(m.Controller, clone, m.scheme); err != nil { return err } - case *batchv1.CronJob: - clone = obj.(*batchv1.CronJob).DeepCopy() + case *batchv1.Job: + clone = obj.(*batchv1.Job).DeepCopy() // Note that ValidateOwnerReferences() will reject this patch if another // OwnerReference exists with controller=true. if err := controllerutil.SetControllerReference(m.Controller, clone, m.scheme); err != nil { @@ -529,3 +531,218 @@ func CheckStatefulSet(etcd *druidv1alpha1.Etcd, statefulSet *appsv1.StatefulSet) return nil } + +func getMapFromEtcd(im imagevector.ImageVector, etcd *druidv1alpha1.Etcd) (map[string]interface{}, error) { + var ( + images map[string]*imagevector.Image + err error + ) + + imageNames := []string{ + common.Etcd, + common.BackupRestore, + } + + if etcd.Spec.Etcd.Image == nil || etcd.Spec.Backup.Image == nil { + + images, err = imagevector.FindImages(im, imageNames) + if err != nil { + return map[string]interface{}{}, err + } + } + + var statefulsetReplicas int + if etcd.Spec.Replicas != 0 { + statefulsetReplicas = 1 + } + + etcdValues := map[string]interface{}{ + "defragmentationSchedule": etcd.Spec.Etcd.DefragmentationSchedule, + "enableTLS": (etcd.Spec.Etcd.TLS != nil), + "pullPolicy": corev1.PullIfNotPresent, + // "username": etcd.Spec.Etcd.Username, + // "password": etcd.Spec.Etcd.Password, + } + + if etcd.Spec.Etcd.Resources != nil { + etcdValues["resources"] = etcd.Spec.Etcd.Resources + } + + if etcd.Spec.Etcd.Metrics != nil { + etcdValues["metrics"] = etcd.Spec.Etcd.Metrics + } + + if etcd.Spec.Etcd.ServerPort != nil { + etcdValues["serverPort"] = etcd.Spec.Etcd.ServerPort + } + + if etcd.Spec.Etcd.ClientPort != nil { + etcdValues["clientPort"] = etcd.Spec.Etcd.ClientPort + } + + if etcd.Spec.Etcd.EtcdDefragTimeout != nil { + etcdValues["etcdDefragTimeout"] = etcd.Spec.Etcd.EtcdDefragTimeout + } + + if etcd.Spec.Etcd.Image == nil { + val, ok := images[common.Etcd] + if !ok { + return map[string]interface{}{}, fmt.Errorf("either etcd resource or image vector should have %s image", common.Etcd) + } + etcdValues["image"] = val.String() + } else { + etcdValues["image"] = etcd.Spec.Etcd.Image + } + + var quota int64 = 8 * 1024 * 1024 * 1024 // 8Gi + if etcd.Spec.Etcd.Quota != nil { + quota = etcd.Spec.Etcd.Quota.Value() + } + + var deltaSnapshotMemoryLimit int64 = 100 * 1024 * 1024 // 100Mi + if etcd.Spec.Backup.DeltaSnapshotMemoryLimit != nil { + deltaSnapshotMemoryLimit = etcd.Spec.Backup.DeltaSnapshotMemoryLimit.Value() + } + + var enableProfiling = false + if etcd.Spec.Backup.EnableProfiling != nil { + enableProfiling = *etcd.Spec.Backup.EnableProfiling + + } + + backupValues := map[string]interface{}{ + "pullPolicy": corev1.PullIfNotPresent, + "etcdQuotaBytes": quota, + "etcdConnectionTimeout": "5m", + "snapstoreTempDir": "/var/etcd/data/temp", + "deltaSnapshotMemoryLimit": deltaSnapshotMemoryLimit, + "enableProfiling": enableProfiling, + } + + if etcd.Spec.Backup.Resources != nil { + backupValues["resources"] = etcd.Spec.Backup.Resources + } + + if etcd.Spec.Backup.FullSnapshotSchedule != nil { + backupValues["fullSnapshotSchedule"] = etcd.Spec.Backup.FullSnapshotSchedule + } + + if etcd.Spec.Backup.GarbageCollectionPolicy != nil { + backupValues["garbageCollectionPolicy"] = etcd.Spec.Backup.GarbageCollectionPolicy + } + + if etcd.Spec.Backup.GarbageCollectionPeriod != nil { + backupValues["garbageCollectionPeriod"] = etcd.Spec.Backup.GarbageCollectionPeriod + } + + if etcd.Spec.Backup.DeltaSnapshotPeriod != nil { + backupValues["deltaSnapshotPeriod"] = etcd.Spec.Backup.DeltaSnapshotPeriod + } + + if etcd.Spec.Backup.BackupCompactionSchedule != nil { + backupValues["backupCompactionSchedule"] = etcd.Spec.Backup.BackupCompactionSchedule + } + + if etcd.Spec.Backup.EtcdSnapshotTimeout != nil { + backupValues["etcdSnapshotTimeout"] = etcd.Spec.Backup.EtcdSnapshotTimeout + } + + if etcd.Spec.Backup.Port != nil { + backupValues["port"] = etcd.Spec.Backup.Port + } + + if etcd.Spec.Backup.SnapshotCompression != nil { + compressionValues := make(map[string]interface{}) + if etcd.Spec.Backup.SnapshotCompression.Enabled { + compressionValues["enabled"] = etcd.Spec.Backup.SnapshotCompression.Enabled + } + if etcd.Spec.Backup.SnapshotCompression.Policy != nil { + compressionValues["policy"] = etcd.Spec.Backup.SnapshotCompression.Policy + } + backupValues["compression"] = compressionValues + } + + if etcd.Spec.Backup.Image == nil { + val, ok := images[common.BackupRestore] + if !ok { + return map[string]interface{}{}, fmt.Errorf("either etcd resource or image vector should have %s image", common.BackupRestore) + } + backupValues["image"] = val.String() + } else { + backupValues["image"] = etcd.Spec.Backup.Image + } + + volumeClaimTemplateName := etcd.Name + if etcd.Spec.VolumeClaimTemplate != nil && len(*etcd.Spec.VolumeClaimTemplate) != 0 { + volumeClaimTemplateName = *etcd.Spec.VolumeClaimTemplate + } + + sharedConfigValues := map[string]interface{}{ + "autoCompactionMode": druidv1alpha1.Periodic, + "autoCompactionRetention": DefaultAutoCompactionRetention, + } + + if etcd.Spec.Common.AutoCompactionMode != nil { + sharedConfigValues["autoCompactionMode"] = etcd.Spec.Common.AutoCompactionMode + } + + if etcd.Spec.Common.AutoCompactionRetention != nil { + sharedConfigValues["autoCompactionRetention"] = etcd.Spec.Common.AutoCompactionRetention + } + + values := map[string]interface{}{ + "name": etcd.Name, + "uid": etcd.UID, + "selector": etcd.Spec.Selector, + "labels": etcd.Spec.Labels, + "annotations": etcd.Spec.Annotations, + "etcd": etcdValues, + "backup": backupValues, + "sharedConfig": sharedConfigValues, + "replicas": etcd.Spec.Replicas, + "statefulsetReplicas": statefulsetReplicas, + "serviceName": fmt.Sprintf("%s-client", etcd.Name), + "configMapName": fmt.Sprintf("etcd-bootstrap-%s", string(etcd.UID[:6])), + "jobName": getJobName(etcd), + "volumeClaimTemplateName": volumeClaimTemplateName, + } + + if etcd.Spec.StorageCapacity != nil { + values["storageCapacity"] = etcd.Spec.StorageCapacity + } + + if etcd.Spec.StorageClass != nil { + values["storageClass"] = etcd.Spec.StorageClass + } + + if etcd.Spec.PriorityClassName != nil { + values["priorityClassName"] = *etcd.Spec.PriorityClassName + } + + if etcd.Spec.Etcd.TLS != nil { + values["tlsServerSecret"] = etcd.Spec.Etcd.TLS.ServerTLSSecretRef.Name + values["tlsClientSecret"] = etcd.Spec.Etcd.TLS.ClientTLSSecretRef.Name + values["tlsCASecret"] = etcd.Spec.Etcd.TLS.TLSCASecretRef.Name + } + + if etcd.Spec.Backup.Store != nil { + storageProvider, err := utils.StorageProviderFromInfraProvider(etcd.Spec.Backup.Store.Provider) + if err != nil { + return nil, err + } + storeValues := map[string]interface{}{ + "storePrefix": etcd.Spec.Backup.Store.Prefix, + "storageProvider": storageProvider, + } + if etcd.Spec.Backup.Store.Container != nil { + storeValues["storageContainer"] = etcd.Spec.Backup.Store.Container + } + if etcd.Spec.Backup.Store.SecretRef != nil { + storeValues["storeSecret"] = etcd.Spec.Backup.Store.SecretRef.Name + } + + values["store"] = storeValues + } + + return values, nil +} diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go index 7537e2a2e..ce4199f2e 100644 --- a/controllers/controllers_suite_test.go +++ b/controllers/controllers_suite_test.go @@ -27,18 +27,14 @@ import ( "github.com/gardener/gardener/pkg/utils/test" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/envtest/printer" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" // +kubebuilder:scaffold:imports ) @@ -103,8 +99,7 @@ var _ = BeforeSuite(func(done Done) { }) Expect(err).NotTo(HaveOccurred()) - Expect(err).NotTo(HaveOccurred()) - er, err := NewEtcdReconcilerWithImageVector(mgr, false) + er, err := NewEtcdReconcilerWithImageVector(mgr) Expect(err).NotTo(HaveOccurred()) err = er.SetupWithManager(mgr, 1, true) @@ -116,9 +111,20 @@ var _ = BeforeSuite(func(done Done) { }, }) + Expect(err).NotTo(HaveOccurred()) + err = custodian.SetupWithManager(mgrCtx, mgr, 1) Expect(err).NotTo(HaveOccurred()) + lc := NewLeaseController(mgr, controllersconfig.CompactionConfig{ + EventsThreshold: 1000000, + ActiveDeadlineDuration: "2m", + }) + Expect(err).NotTo(HaveOccurred()) + + err = lc.SetupWithManager(mgrCtx, mgr, 1) + Expect(err).NotTo(HaveOccurred()) + mgrStopped = startTestManager(mgrCtx, mgr) close(done) @@ -145,14 +151,3 @@ func startTestManager(ctx context.Context, mgr manager.Manager) *sync.WaitGroup mgr.GetCache().WaitForCacheSync(syncCtx) return wg } - -func SetupWithManager(mgr ctrl.Manager, r reconcile.Reconciler) error { - return ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{ - MaxConcurrentReconciles: 10, - }). - For(&druidv1alpha1.Etcd{}). - Owns(&corev1.ConfigMap{}). - Owns(&corev1.Service{}). - Owns(&appsv1.StatefulSet{}). - Complete(r) -} diff --git a/controllers/etcd_controller.go b/controllers/etcd_controller.go index 66606f776..49f553b15 100644 --- a/controllers/etcd_controller.go +++ b/controllers/etcd_controller.go @@ -29,6 +29,7 @@ import ( "github.com/gardener/etcd-druid/pkg/common" druidpredicates "github.com/gardener/etcd-druid/pkg/predicate" "github.com/gardener/etcd-druid/pkg/utils" + coordinationv1 "k8s.io/api/coordination/v1" extensionspredicate "github.com/gardener/gardener/extensions/pkg/predicate" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" @@ -39,11 +40,11 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -96,17 +97,16 @@ var ( // EtcdReconciler reconciles a Etcd object type EtcdReconciler struct { client.Client - Scheme *runtime.Scheme - chartApplier kubernetes.ChartApplier - Config *rest.Config - ImageVector imagevector.ImageVector - logger logr.Logger - enableBackupCompactionJobTempFS bool + Scheme *runtime.Scheme + chartApplier kubernetes.ChartApplier + Config *rest.Config + ImageVector imagevector.ImageVector + logger logr.Logger } // NewReconcilerWithImageVector creates a new EtcdReconciler object with an image vector func NewReconcilerWithImageVector(mgr manager.Manager) (*EtcdReconciler, error) { - etcdReconciler, err := NewEtcdReconciler(mgr, false) + etcdReconciler, err := NewEtcdReconciler(mgr) if err != nil { return nil, err } @@ -114,19 +114,18 @@ func NewReconcilerWithImageVector(mgr manager.Manager) (*EtcdReconciler, error) } // NewEtcdReconciler creates a new EtcdReconciler object -func NewEtcdReconciler(mgr manager.Manager, enableBackupCompactionJobTempFS bool) (*EtcdReconciler, error) { +func NewEtcdReconciler(mgr manager.Manager) (*EtcdReconciler, error) { return (&EtcdReconciler{ - Client: mgr.GetClient(), - Config: mgr.GetConfig(), - Scheme: mgr.GetScheme(), - logger: log.Log.WithName("etcd-controller"), - enableBackupCompactionJobTempFS: enableBackupCompactionJobTempFS, + Client: mgr.GetClient(), + Config: mgr.GetConfig(), + Scheme: mgr.GetScheme(), + logger: log.Log.WithName("etcd-controller"), }).InitializeControllerWithChartApplier() } // NewEtcdReconcilerWithImageVector creates a new EtcdReconciler object -func NewEtcdReconcilerWithImageVector(mgr manager.Manager, enableBackupCompactionJobTempFS bool) (*EtcdReconciler, error) { - ec, err := NewEtcdReconciler(mgr, enableBackupCompactionJobTempFS) +func NewEtcdReconcilerWithImageVector(mgr manager.Manager) (*EtcdReconciler, error) { + ec, err := NewEtcdReconciler(mgr) if err != nil { return nil, err } @@ -149,10 +148,6 @@ func getChartPathForService() string { return filepath.Join("etcd", "templates", "etcd-service.yaml") } -func getChartPathForCronJob() string { - return filepath.Join("etcd", "templates", "etcd-compaction-cronjob.yaml") -} - func getImageYAMLPath() string { return filepath.Join(common.ChartPath, DefaultImageVector) } @@ -189,6 +184,7 @@ func (r *EtcdReconciler) InitializeControllerWithImageVector() (*EtcdReconciler, // +kubebuilder:rbac:groups=druid.gardener.cloud,resources=etcds,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=druid.gardener.cloud,resources=etcds/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete // Reconcile reconciles the etcd. func (r *EtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -254,6 +250,23 @@ func (r *EtcdReconciler) reconcile(ctx context.Context, etcd *druidv1alpha1.Etcd }, err } + // TODO: (abdasgupta) Use the full snapshot lease available from the reconcile operation for setting backupready condition + fl, err := r.reconcileFullLease(ctx, logger, etcd) + if err != nil { + return ctrl.Result{ + Requeue: true, + }, err + } + logger.Info("Available Full Snapshot Lease: " + fl.Name) + + dl, err := r.reconcileDeltaLease(ctx, logger, etcd) + if err != nil { + return ctrl.Result{ + Requeue: true, + }, err + } + logger.Info("Available Delta Snapshot Lease: " + dl.Name) + op, svc, sts, err := r.reconcileEtcd(ctx, logger, etcd) if err != nil { if err := r.updateEtcdErrorStatus(ctx, op, etcd, sts, err); err != nil { @@ -791,146 +804,167 @@ func (r *EtcdReconciler) getStatefulSetFromEtcd(etcd *druidv1alpha1.Etcd, values return decoded, nil } -func (r *EtcdReconciler) reconcileCronJob(ctx context.Context, logger logr.Logger, etcd *druidv1alpha1.Etcd, values map[string]interface{}) (*batchv1.CronJob, error) { - logger.Info("Reconcile etcd compaction cronjob") - - var cronJob batchv1.CronJob - err := r.Get(ctx, types.NamespacedName{Name: getCronJobName(etcd), Namespace: etcd.Namespace}, &cronJob) - - //If backupCompactionSchedule is present in the etcd spec, continue with reconciliation of cronjob - //If backupCompactionSchedule is not present in the etcd spec, do not proceed with cronjob - //reconciliation. Furthermore, delete any already existing cronjobs corresponding with this etcd - backupCompactionScheduleFound := false - if values["backup"].(map[string]interface{})["backupCompactionSchedule"] != nil { - backupCompactionScheduleFound = true - } - - if !backupCompactionScheduleFound { - if err == nil { - err = r.Delete(ctx, &cronJob, client.PropagationPolicy(metav1.DeletePropagationForeground)) - if err != nil { - return nil, err +func (r *EtcdReconciler) reconcileFullLease(ctx context.Context, logger logr.Logger, etcd *druidv1alpha1.Etcd) (*coordinationv1.Lease, error) { + // Get or Create full_snapshot_revisions lease object that will help to set BackupReady condition + nsName := types.NamespacedName{ + Name: getFullLease(etcd), + Namespace: etcd.Namespace, + } + + fullLease := &coordinationv1.Lease{} + err1 := r.Get(ctx, nsName, fullLease) + if err1 != nil { + logger.Info("Couldn't fetch full snap lease full-snapshot-revisions because: " + err1.Error()) + + if errors.IsNotFound(err1) { + logger.Info("Creating the full snap lease full-snapshot-revisions") + + renewTime := metav1.NewMicroTime(time.Now()) + fullLease = &coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: getFullLease(etcd), + Namespace: etcd.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "druid.gardener.cloud/v1alpha1", + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + Kind: "Etcd", + Name: etcd.Name, + UID: etcd.UID, + }, + }, + }, + Spec: coordinationv1.LeaseSpec{ + HolderIdentity: pointer.StringPtr("0"), + RenewTime: &renewTime, + }, } - } - return nil, nil - } - - if err == nil { - logger.Info("Claiming cronjob object") - // If any adoptions are attempted, we should first recheck for deletion with - // an uncached quorum read sometime after listing Machines (see #42639). - //TODO: Consider putting this claim logic just after creating a new cronjob - canAdoptFunc := RecheckDeletionTimestamp(func() (metav1.Object, error) { - foundEtcd := &druidv1alpha1.Etcd{} - err := r.Get(context.TODO(), types.NamespacedName{Name: etcd.Name, Namespace: etcd.Namespace}, foundEtcd) - if err != nil { - return nil, err + err2 := r.Create(ctx, fullLease) + if err2 != nil { + logger.Error(err2, "Full snap lease full-snapshot-revisions couldn't be created") + return nil, err2 } - if foundEtcd.GetDeletionTimestamp() != nil { - return nil, fmt.Errorf("%v/%v etcd is marked for deletion", etcd.Namespace, etcd.Name) + fullLease = &coordinationv1.Lease{} + err2 = r.Get(ctx, nsName, fullLease) + if err2 != nil { + logger.Error(err2, "Full snap lease full-snapshot-revisions couldn't be fetched just after creation") + return nil, err2 } - - if foundEtcd.UID != etcd.UID { - return nil, fmt.Errorf("original %v/%v etcd gone: got uid %v, wanted %v", etcd.Namespace, etcd.Name, foundEtcd.UID, etcd.UID) + } else { + return nil, err1 + } + } + if !checkEtcdOwnerReference(fullLease.GetOwnerReferences(), etcd) { + err := kutil.TryPatch(ctx, retry.DefaultBackoff, r.Client, fullLease, func() error { + fullLease.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: "druid.gardener.cloud/v1alpha1", + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + Kind: "Etcd", + Name: etcd.Name, + UID: etcd.UID, + }, } - return foundEtcd, nil + return nil }) - - selector, err := metav1.LabelSelectorAsSelector(etcd.Spec.Selector) - if err != nil { - logger.Error(err, "Error converting etcd selector to selector") - return nil, err - } - dm := NewEtcdDruidRefManager(r.Client, r.Scheme, etcd, selector, etcdGVK, canAdoptFunc) - - logger.Info("Claiming existing cronjob") - claimedCronJob, err := dm.ClaimCronJob(ctx, &cronJob) if err != nil { + logger.Error(err, "Full snap lease found but error occured during claiming it") return nil, err } - - if _, err = r.syncCronJobSpec(ctx, claimedCronJob, etcd, values, logger); err != nil { - return nil, err - } - - return claimedCronJob, err - } - - // Required cronjob doesn't exist. Create new - cj, err := r.getCronJobFromEtcd(etcd, values, logger) - if err != nil { - return nil, err - } - - logger.Info("Creating cronjob", "cronjob", kutil.Key(cj.Namespace, cj.Name).String()) - err = r.Create(ctx, cj) - - // Ignore the precondition violated error, this machine is already updated - // with the desired label. - if err == errorsutil.ErrPreconditionViolated { - logger.Info("Cronjob precondition doesn't hold, skip updating it", "cronjob", kutil.Key(cj.Namespace, cj.Name).String()) - err = nil } - if err != nil { - return nil, err - } - - //TODO: Evaluate necessity of claiming object here after creation - - return cj, err + return fullLease, nil } -func (r *EtcdReconciler) syncCronJobSpec(ctx context.Context, cj *batchv1.CronJob, etcd *druidv1alpha1.Etcd, values map[string]interface{}, logger logr.Logger) (*batchv1.CronJob, error) { - decoded, err := r.getCronJobFromEtcd(etcd, values, logger) - if err != nil { - return nil, err - } - - if reflect.DeepEqual(cj.Spec, decoded.Spec) { - return cj, nil - } - - cjCopy := cj.DeepCopy() - cjCopy.Spec = decoded.Spec +func getFullLease(etcd *druidv1alpha1.Etcd) string { + return fmt.Sprintf("%s-full-snap", string(etcd.UID[:6])) +} - err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - return r.Patch(ctx, cjCopy, client.MergeFrom(cj)) - }) +func (r *EtcdReconciler) reconcileDeltaLease(ctx context.Context, logger logr.Logger, etcd *druidv1alpha1.Etcd) (*coordinationv1.Lease, error) { + // Get or Create delta_snapshot_revisions lease object that will keep track of delta snapshot revisions based on which + // compaction job will be scheduled + nsName := types.NamespacedName{ + Name: getDeltaLease(etcd), + Namespace: etcd.Namespace, + } + + deltaLease := &coordinationv1.Lease{} + err1 := r.Get(ctx, nsName, deltaLease) + if err1 != nil { + logger.Info("Couldn't fetch delta snap lease delta-snapshot-revisions because: " + err1.Error()) + + if errors.IsNotFound(err1) { + logger.Info("Creating the delta snap lease delta-snapshot-revisions") + + renewTime := metav1.NewMicroTime(time.Now()) + deltaLease = &coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: getDeltaLease(etcd), + Namespace: etcd.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "druid.gardener.cloud/v1alpha1", + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + Kind: "Etcd", + Name: etcd.Name, + UID: etcd.UID, + }, + }, + }, + Spec: coordinationv1.LeaseSpec{ + HolderIdentity: pointer.StringPtr("0"), + RenewTime: &renewTime, + }, + } + err2 := r.Create(ctx, deltaLease) + if err2 != nil { + logger.Error(err2, "Delta snap lease delta-snapshot-revisions couldn't be created") + return nil, err2 + } - if err == errorsutil.ErrPreconditionViolated { - logger.Info("cronjob precondition doesn't hold, skip updating it", "cronjob", kutil.Key(cjCopy.Namespace, cjCopy.Name).String()) - err = nil - } - if err != nil { - return nil, err + deltaLease = &coordinationv1.Lease{} + err2 = r.Get(ctx, nsName, deltaLease) + if err2 != nil { + logger.Error(err2, "Delta snap lease delta-snapshot-revisions couldn't be fetched just after creation") + return nil, err2 + } + } else { + return nil, err1 + } } - return cjCopy, err -} - -func (r *EtcdReconciler) getCronJobFromEtcd(etcd *druidv1alpha1.Etcd, values map[string]interface{}, logger logr.Logger) (*batchv1.CronJob, error) { - var err error - decoded := &batchv1.CronJob{} - cronJobPath := getChartPathForCronJob() - chartPath := getChartPath() - renderedChart, err := r.chartApplier.Render(chartPath, etcd.Name, etcd.Namespace, values) - if err != nil { - return nil, err - } - if content, ok := renderedChart.Files()[cronJobPath]; ok { - decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader([]byte(content)), 1024) - if err = decoder.Decode(&decoded); err != nil { + if !checkEtcdOwnerReference(deltaLease.GetOwnerReferences(), etcd) { + err := kutil.TryPatch(ctx, retry.DefaultBackoff, r.Client, deltaLease, func() error { + deltaLease.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: "druid.gardener.cloud/v1alpha1", + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + Kind: "Etcd", + Name: etcd.Name, + UID: etcd.UID, + }, + } + return nil + }) + if err != nil { + logger.Error(err, "Delta snap lease found but error occured during claiming it") return nil, err } - return decoded, nil } - return nil, fmt.Errorf("missing cronjob template file in the charts: %v", cronJobPath) + return deltaLease, nil +} + +func getDeltaLease(etcd *druidv1alpha1.Etcd) string { + return fmt.Sprintf("%s-delta-snap", string(etcd.UID[:6])) } func (r *EtcdReconciler) reconcileEtcd(ctx context.Context, logger logr.Logger, etcd *druidv1alpha1.Etcd) (operationResult, *corev1.Service, *appsv1.StatefulSet, error) { - values, err := r.getMapFromEtcd(etcd) + values, err := getMapFromEtcd(r.ImageVector, etcd) if err != nil { return noOp, nil, nil, err } @@ -957,14 +991,6 @@ func (r *EtcdReconciler) reconcileEtcd(ctx context.Context, logger logr.Logger, values["configMapName"] = cm.Name } - cj, err := r.reconcileCronJob(ctx, logger, etcd, values) - if err != nil { - return noOp, nil, nil, err - } - if cj != nil { - values["cronJobName"] = cj.Name - } - op, sts, err := r.reconcileStatefulSet(ctx, logger, etcd, values) if err != nil { return noOp, nil, nil, err @@ -1001,223 +1027,6 @@ func checkEtcdAnnotations(annotations map[string]string, etcd metav1.Object) boo } -func (r *EtcdReconciler) getMapFromEtcd(etcd *druidv1alpha1.Etcd) (map[string]interface{}, error) { - var ( - images map[string]*imagevector.Image - err error - ) - - imageNames := []string{ - common.Etcd, - common.BackupRestore, - } - - if etcd.Spec.Etcd.Image == nil || etcd.Spec.Backup.Image == nil { - - images, err = imagevector.FindImages(r.ImageVector, imageNames) - if err != nil { - return map[string]interface{}{}, err - } - } - - var statefulsetReplicas int - if etcd.Spec.Replicas != 0 { - statefulsetReplicas = 1 - } - - etcdValues := map[string]interface{}{ - "defragmentationSchedule": etcd.Spec.Etcd.DefragmentationSchedule, - "enableTLS": (etcd.Spec.Etcd.TLS != nil), - "pullPolicy": corev1.PullIfNotPresent, - // "username": etcd.Spec.Etcd.Username, - // "password": etcd.Spec.Etcd.Password, - } - - if etcd.Spec.Etcd.Resources != nil { - etcdValues["resources"] = etcd.Spec.Etcd.Resources - } - - if etcd.Spec.Etcd.Metrics != nil { - etcdValues["metrics"] = etcd.Spec.Etcd.Metrics - } - - if etcd.Spec.Etcd.ServerPort != nil { - etcdValues["serverPort"] = etcd.Spec.Etcd.ServerPort - } - - if etcd.Spec.Etcd.ClientPort != nil { - etcdValues["clientPort"] = etcd.Spec.Etcd.ClientPort - } - - if etcd.Spec.Etcd.EtcdDefragTimeout != nil { - etcdValues["etcdDefragTimeout"] = etcd.Spec.Etcd.EtcdDefragTimeout - } - - if etcd.Spec.Etcd.Image == nil { - val, ok := images[common.Etcd] - if !ok { - return map[string]interface{}{}, fmt.Errorf("either etcd resource or image vector should have %s image", common.Etcd) - } - etcdValues["image"] = val.String() - } else { - etcdValues["image"] = etcd.Spec.Etcd.Image - } - - var quota int64 = 8 * 1024 * 1024 * 1024 // 8Gi - if etcd.Spec.Etcd.Quota != nil { - quota = etcd.Spec.Etcd.Quota.Value() - } - - var deltaSnapshotMemoryLimit int64 = 100 * 1024 * 1024 // 100Mi - if etcd.Spec.Backup.DeltaSnapshotMemoryLimit != nil { - deltaSnapshotMemoryLimit = etcd.Spec.Backup.DeltaSnapshotMemoryLimit.Value() - } - - var enableProfiling = false - if etcd.Spec.Backup.EnableProfiling != nil { - enableProfiling = *etcd.Spec.Backup.EnableProfiling - - } - - backupValues := map[string]interface{}{ - "pullPolicy": corev1.PullIfNotPresent, - "etcdQuotaBytes": quota, - "etcdConnectionTimeout": "5m", - "snapstoreTempDir": "/var/etcd/data/temp", - "deltaSnapshotMemoryLimit": deltaSnapshotMemoryLimit, - "enableProfiling": enableProfiling, - } - - if etcd.Spec.Backup.Resources != nil { - backupValues["resources"] = etcd.Spec.Backup.Resources - } - - if etcd.Spec.Backup.FullSnapshotSchedule != nil { - backupValues["fullSnapshotSchedule"] = etcd.Spec.Backup.FullSnapshotSchedule - } - - if etcd.Spec.Backup.GarbageCollectionPolicy != nil { - backupValues["garbageCollectionPolicy"] = etcd.Spec.Backup.GarbageCollectionPolicy - } - - if etcd.Spec.Backup.GarbageCollectionPeriod != nil { - backupValues["garbageCollectionPeriod"] = etcd.Spec.Backup.GarbageCollectionPeriod - } - - if etcd.Spec.Backup.DeltaSnapshotPeriod != nil { - backupValues["deltaSnapshotPeriod"] = etcd.Spec.Backup.DeltaSnapshotPeriod - } - - if etcd.Spec.Backup.BackupCompactionSchedule != nil { - backupValues["backupCompactionSchedule"] = etcd.Spec.Backup.BackupCompactionSchedule - } - - backupValues["enableBackupCompactionJobTempFS"] = r.enableBackupCompactionJobTempFS - - if etcd.Spec.Backup.EtcdSnapshotTimeout != nil { - backupValues["etcdSnapshotTimeout"] = etcd.Spec.Backup.EtcdSnapshotTimeout - } - - if etcd.Spec.Backup.Port != nil { - backupValues["port"] = etcd.Spec.Backup.Port - } - - if etcd.Spec.Backup.SnapshotCompression != nil { - compressionValues := make(map[string]interface{}) - if etcd.Spec.Backup.SnapshotCompression.Enabled { - compressionValues["enabled"] = etcd.Spec.Backup.SnapshotCompression.Enabled - } - if etcd.Spec.Backup.SnapshotCompression.Policy != nil { - compressionValues["policy"] = etcd.Spec.Backup.SnapshotCompression.Policy - } - backupValues["compression"] = compressionValues - } - - if etcd.Spec.Backup.Image == nil { - val, ok := images[common.BackupRestore] - if !ok { - return map[string]interface{}{}, fmt.Errorf("either etcd resource or image vector should have %s image", common.BackupRestore) - } - backupValues["image"] = val.String() - } else { - backupValues["image"] = etcd.Spec.Backup.Image - } - - volumeClaimTemplateName := etcd.Name - if etcd.Spec.VolumeClaimTemplate != nil && len(*etcd.Spec.VolumeClaimTemplate) != 0 { - volumeClaimTemplateName = *etcd.Spec.VolumeClaimTemplate - } - - sharedConfigValues := map[string]interface{}{ - "autoCompactionMode": druidv1alpha1.Periodic, - "autoCompactionRetention": DefaultAutoCompactionRetention, - } - - if etcd.Spec.Common.AutoCompactionMode != nil { - sharedConfigValues["autoCompactionMode"] = etcd.Spec.Common.AutoCompactionMode - } - - if etcd.Spec.Common.AutoCompactionRetention != nil { - sharedConfigValues["autoCompactionRetention"] = etcd.Spec.Common.AutoCompactionRetention - } - - values := map[string]interface{}{ - "name": etcd.Name, - "uid": etcd.UID, - "selector": etcd.Spec.Selector, - "labels": etcd.Spec.Labels, - "annotations": etcd.Spec.Annotations, - "etcd": etcdValues, - "backup": backupValues, - "sharedConfig": sharedConfigValues, - "replicas": etcd.Spec.Replicas, - "statefulsetReplicas": statefulsetReplicas, - "serviceName": fmt.Sprintf("%s-client", etcd.Name), - "configMapName": fmt.Sprintf("etcd-bootstrap-%s", string(etcd.UID[:6])), - "cronJobName": getCronJobName(etcd), - "volumeClaimTemplateName": volumeClaimTemplateName, - } - - if etcd.Spec.StorageCapacity != nil { - values["storageCapacity"] = etcd.Spec.StorageCapacity - } - - if etcd.Spec.StorageClass != nil { - values["storageClass"] = etcd.Spec.StorageClass - } - - if etcd.Spec.PriorityClassName != nil { - values["priorityClassName"] = *etcd.Spec.PriorityClassName - } - - if etcd.Spec.Etcd.TLS != nil { - values["tlsServerSecret"] = etcd.Spec.Etcd.TLS.ServerTLSSecretRef.Name - values["tlsClientSecret"] = etcd.Spec.Etcd.TLS.ClientTLSSecretRef.Name - values["tlsCASecret"] = etcd.Spec.Etcd.TLS.TLSCASecretRef.Name - } - - if etcd.Spec.Backup.Store != nil { - storageProvider, err := utils.StorageProviderFromInfraProvider(etcd.Spec.Backup.Store.Provider) - if err != nil { - return nil, err - } - storeValues := map[string]interface{}{ - "storePrefix": etcd.Spec.Backup.Store.Prefix, - "storageProvider": storageProvider, - } - if etcd.Spec.Backup.Store.Container != nil { - storeValues["storageContainer"] = etcd.Spec.Backup.Store.Container - } - if etcd.Spec.Backup.Store.SecretRef != nil { - storeValues["storeSecret"] = etcd.Spec.Backup.Store.SecretRef.Name - } - - values["store"] = storeValues - } - - return values, nil -} - func (r *EtcdReconciler) addFinalizersToDependantSecrets(ctx context.Context, logger logr.Logger, etcd *druidv1alpha1.Etcd) error { secrets := []*corev1.SecretReference{} if etcd.Spec.Etcd.TLS != nil { @@ -1493,10 +1302,6 @@ func (r *EtcdReconciler) SetupWithManager(mgr ctrl.Manager, workers int, ignoreO return builder.Complete(r) } -func getCronJobName(etcd *druidv1alpha1.Etcd) string { - return fmt.Sprintf("%s-compact-backup", etcd.Name) -} - func buildPredicate(ignoreOperationAnnotation bool) predicate.Predicate { if ignoreOperationAnnotation { return predicate.GenerationChangedPredicate{} diff --git a/controllers/etcd_controller_test.go b/controllers/etcd_controller_test.go index c08dd57b7..1cd0e6a08 100644 --- a/controllers/etcd_controller_test.go +++ b/controllers/etcd_controller_test.go @@ -30,20 +30,22 @@ import ( "github.com/gardener/etcd-druid/pkg/common" "github.com/gardener/etcd-druid/pkg/utils" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + kutil "github.com/gardener/gardener/pkg/utils/kubernetes" "github.com/gardener/gardener/pkg/utils/kubernetes/health" . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/batch/v1" - batchv1 "k8s.io/api/batch/v1beta1" + batchv1 "k8s.io/api/batch/v1" + coordinationv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -100,8 +102,7 @@ var ( common.Etcd, common.BackupRestore, } - backupCompactionSchedule = "15 */24 * * *" - etcdSnapshotTimeout = metav1.Duration{ + etcdSnapshotTimeout = metav1.Duration{ Duration: 10 * time.Minute, } etcdDefragTimeout = metav1.Duration{ @@ -444,6 +445,67 @@ var _ = Describe("Druid", func() { Entry("when etcd has the spec changed, druid should reconcile statefulset", "foo3", WithoutOwner), ) + Describe("when fields are not set in etcd.Spec", func() { + Context("when fields are not set in etcd.Spec", func() { + var err error + var instance *druidv1alpha1.Etcd + var c client.Client + var s *appsv1.StatefulSet + var cm *corev1.ConfigMap + var svc *corev1.Service + var j *batchv1.Job + BeforeEach(func() { + instance = getEtcdWithDefault("foo333", "default") + c = mgr.GetClient() + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Namespace, + }, + } + + _, err = controllerutil.CreateOrUpdate(context.TODO(), c, &ns, func() error { return nil }) + Expect(err).To(Not(HaveOccurred())) + + err = c.Create(context.TODO(), instance) + Expect(err).NotTo(HaveOccurred()) + s = &appsv1.StatefulSet{} + Eventually(func() error { return statefulsetIsCorrectlyReconciled(c, instance, s) }, timeout, pollingInterval).Should(BeNil()) + cm = &corev1.ConfigMap{} + Eventually(func() error { return configMapIsCorrectlyReconciled(c, instance, cm) }, timeout, pollingInterval).Should(BeNil()) + svc = &corev1.Service{} + Eventually(func() error { return serviceIsCorrectlyReconciled(c, instance, svc) }, timeout, pollingInterval).Should(BeNil()) + }) + It("no jobs will be scheduled because no store details are provided", func() { + // Verufy if the statefulset updated the specs + validateEtcdAppWithDefaults(instance, s, cm, svc) + + setStatefulSetReady(s) + err = c.Status().Update(context.TODO(), s) + Expect(err).NotTo(HaveOccurred()) + + // Verify if that the job is not created even if the holder identity in delta-snapshot-revision is greater than 1M + deltaLease := &coordinationv1.Lease{} + Eventually(func() error { return deltaLeaseIsCorrectlyReconciled(c, instance, deltaLease) }, timeout, pollingInterval).Should(BeNil()) + err = kutil.TryUpdate(context.TODO(), retry.DefaultBackoff, c, deltaLease, func() error { + deltaLease.Spec.HolderIdentity = pointer.StringPtr("1000000") + renewedTime := time.Now() + deltaLease.Spec.RenewTime = &metav1.MicroTime{Time: renewedTime} + return nil + }) + Expect(err).NotTo(HaveOccurred()) + + j = &batchv1.Job{} + Eventually(func() error { return jobIsCorrectlyReconciled(c, instance, j) }, time.Duration(30*time.Second), pollingInterval).ShouldNot(BeNil()) + + }) + AfterEach(func() { + Expect(c.Delete(context.TODO(), instance)).To(Succeed()) + Eventually(func() error { return statefulSetRemoved(c, s) }, timeout, pollingInterval).Should(BeNil()) + Eventually(func() error { return etcdRemoved(c, instance) }, timeout, pollingInterval).Should(BeNil()) + }) + }) + }) + Describe("when adding etcd resources with statefulset already present", func() { Context("when statefulset is in crashloopbackoff", func() { var err error @@ -566,14 +628,20 @@ var _ = Describe("Druid", func() { Entry("when statefulset without ownerReference and with owner annotations, druid should adopt and delete statefulset", "foo63", WithOwnerAnnotation), ) + // When an ETCD resource is created, check if the Statefulset, Configmap, Service is also created with validateETCDApp and + // check if associated compaction job is created with validateETCDCmpctJob DescribeTable("when etcd resource is created", - func(name string, generateEtcd func(string, string) *druidv1alpha1.Etcd, validate func(*appsv1.StatefulSet, *corev1.ConfigMap, *corev1.Service, *druidv1alpha1.Etcd)) { + func(name string, + generateEtcd func(string, string) *druidv1alpha1.Etcd, + validateETCDApp func(*druidv1alpha1.Etcd, *appsv1.StatefulSet, *corev1.ConfigMap, *corev1.Service), + validateETCDCmpctJob func(*druidv1alpha1.Etcd, *batchv1.Job)) { var err error var instance *druidv1alpha1.Etcd var c client.Client var s *appsv1.StatefulSet var cm *corev1.ConfigMap var svc *corev1.Service + var j *batchv1.Job instance = generateEtcd(name, "default") c = mgr.GetClient() @@ -600,32 +668,50 @@ var _ = Describe("Druid", func() { svc = &corev1.Service{} Eventually(func() error { return serviceIsCorrectlyReconciled(c, instance, svc) }, timeout, pollingInterval).Should(BeNil()) - validate(s, cm, svc, instance) + // Verify if the job is created when holder identity in delta-snapshot-revision is greater than 1M + deltaLease := &coordinationv1.Lease{} + Eventually(func() error { return deltaLeaseIsCorrectlyReconciled(c, instance, deltaLease) }, timeout, pollingInterval).Should(BeNil()) + err = kutil.TryUpdate(context.TODO(), retry.DefaultBackoff, c, deltaLease, func() error { + deltaLease.Spec.HolderIdentity = pointer.StringPtr("1000000") + renewedTime := time.Now() + deltaLease.Spec.RenewTime = &metav1.MicroTime{Time: renewedTime} + return nil + }) + Expect(err).NotTo(HaveOccurred()) + + j = &batchv1.Job{} + Eventually(func() error { return jobIsCorrectlyReconciled(c, instance, j) }, timeout, pollingInterval).Should(BeNil()) + + validateETCDApp(instance, s, cm, svc) + validateETCDCmpctJob(instance, j) setStatefulSetReady(s) + err = c.Status().Update(context.TODO(), s) Expect(err).NotTo(HaveOccurred()) }, - Entry("if fields are not set in etcd.Spec, the statefulset should reflect the spec changes", "foo51", getEtcdWithDefault, validateEtcdWithDefaults), - Entry("if fields are set in etcd.Spec and TLS enabled, the resources should reflect the spec changes", "foo52", getEtcdWithTLS, validateEtcd), - Entry("if the store is GCS, the statefulset should reflect the spec changes", "foo53", getEtcdWithGCS, validateStoreGCP), - Entry("if the store is S3, the statefulset should reflect the spec changes", "foo54", getEtcdWithS3, validateStoreAWS), - Entry("if the store is ABS, the statefulset should reflect the spec changes", "foo55", getEtcdWithABS, validateStoreAzure), - Entry("if the store is Swift, the statefulset should reflect the spec changes", "foo56", getEtcdWithSwift, validateStoreOpenstack), - Entry("if the store is OSS, the statefulset should reflect the spec changes", "foo57", getEtcdWithOSS, validateStoreAlicloud), + Entry("if fields are set in etcd.Spec and TLS enabled, the resources should reflect the spec changes", "foo52", getEtcdWithTLS, validateEtcdApp, validateEtcdForCmpctJob), + Entry("if the store is GCS, the statefulset and compaction job should reflect the spec changes", "foo53", getEtcdWithGCS, validateStoreGCPForETCDApp, validateStoreGCPForCmpctJob), + Entry("if the store is S3, the statefulset and compaction job should reflect the spec changes", "foo54", getEtcdWithS3, validateStoreAWSForETCDApp, validateStoreAWSForCmpctJob), + Entry("if the store is ABS, the statefulset and compaction job should reflect the spec changes", "foo55", getEtcdWithABS, validateStoreAzureForETCDApp, validateStoreAzureForCmpctJob), + Entry("if the store is Swift, the statefulset and compaction job should reflect the spec changes", "foo56", getEtcdWithSwift, validateStoreOpenstackForETCDApp, validateStoreOpenstackForCmpctJob), + Entry("if the store is OSS, the statefulset and compaction job should reflect the spec changes", "foo57", getEtcdWithOSS, validateStoreAlicloudForETCDApp, validateStoreAlicloudForCmpctJob), ) - DescribeTable("when etcd resource is created with backupCompactionSchedule field", - func(name string, generateEtcd func(string, string) *druidv1alpha1.Etcd, validate func(*appsv1.StatefulSet, *corev1.ConfigMap, *corev1.Service, *batchv1.CronJob, *druidv1alpha1.Etcd)) { + /*Context("when an existing cronjob is already present", func() { + It("should delete the existing cronjob", func() { var err error var instance *druidv1alpha1.Etcd var c client.Client var s *appsv1.StatefulSet var cm *corev1.ConfigMap var svc *corev1.Service - var cj *batchv1.CronJob + var j *batchv1.Job - instance = generateEtcd(name, "default") + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + + instance = getEtcd("foo49", "default", true) c = mgr.GetClient() ns := corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -633,6 +719,11 @@ var _ = Describe("Druid", func() { }, } + // Create Job + cj = createJob(getJobName(instance), instance.Namespace, instance.Spec.Labels) + Expect(c.Create(ctx, cj)).To(Succeed()) + Eventually(func() error { return jobIsCorrectlyReconciled(c, instance, cj) }, timeout, pollingInterval).Should(BeNil()) + _, err = controllerutil.CreateOrUpdate(context.TODO(), c, &ns, func() error { return nil }) Expect(err).To(Not(HaveOccurred())) @@ -643,17 +734,16 @@ var _ = Describe("Druid", func() { } err = c.Create(context.TODO(), instance) Expect(err).NotTo(HaveOccurred()) + s = &appsv1.StatefulSet{} Eventually(func() error { return statefulsetIsCorrectlyReconciled(c, instance, s) }, timeout, pollingInterval).Should(BeNil()) cm = &corev1.ConfigMap{} Eventually(func() error { return configMapIsCorrectlyReconciled(c, instance, cm) }, timeout, pollingInterval).Should(BeNil()) svc = &corev1.Service{} Eventually(func() error { return serviceIsCorrectlyReconciled(c, instance, svc) }, timeout, pollingInterval).Should(BeNil()) - cj = &batchv1.CronJob{} - Eventually(func() error { return cronJobIsCorrectlyReconciled(c, instance, cj) }, timeout, pollingInterval).Should(BeNil()) - - //validate(s, cm, svc, instance) - validate(s, cm, svc, cj, instance) + //Cronjob should not exist + cj = &batchv1.Job{} + Eventually(func() error { return jobRemoved(c, cj) }, timeout, pollingInterval).Should(BeNil()) setStatefulSetReady(s) err = c.Status().Update(context.TODO(), s) @@ -662,120 +752,8 @@ var _ = Describe("Druid", func() { Expect(c.Delete(context.TODO(), instance)).To(Succeed()) Eventually(func() error { return statefulSetRemoved(c, s) }, timeout, pollingInterval).Should(BeNil()) Eventually(func() error { return etcdRemoved(c, instance) }, timeout, pollingInterval).Should(BeNil()) - }, - Entry("if fields are set in etcd.Spec and TLS enabled, the resources should reflect the spec changes", "foo42", getEtcdWithCmpctScheduleTLS, validateEtcdWithCronjob), - Entry("if the store is GCS, the statefulset and cronjob should reflect the spec changes", "foo43", getEtcdWithCmpctScheduleGCS, validateStoreGCPWithCronjob), - Entry("if the store is S3, the statefulset and cronjob should reflect the spec changes", "foo44", getEtcdWithCmpctScheduleS3, validateStoreAWSWithCronjob), - Entry("if the store is ABS, the statefulset and cronjob should reflect the spec changes", "foo45", getEtcdWithCmpctScheduleABS, validateStoreAzureWithCronjob), - Entry("if the store is Swift, the statefulset and cronjob should reflect the spec changes", "foo46", getEtcdWithCmpctScheduleSwift, validateStoreOpenstackWithCronjob), - Entry("if the store is OSS, the statefulset and cronjob should reflect the spec changes", "foo47", getEtcdWithCmpctScheduleOSS, validateStoreAlicloudWithCronjob), - ) - - Describe("with etcd resources without backupCompactionScheduled field", func() { - Context("when creating an etcd object", func() { - It("should not create a cronjob", func() { - var err error - var instance *druidv1alpha1.Etcd - var c client.Client - var s *appsv1.StatefulSet - var cm *corev1.ConfigMap - var svc *corev1.Service - var cj *batchv1.CronJob - - instance = getEtcd("foo48", "default", true) - c = mgr.GetClient() - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: instance.Namespace, - }, - } - - _, err = controllerutil.CreateOrUpdate(context.TODO(), c, &ns, func() error { return nil }) - Expect(err).To(Not(HaveOccurred())) - - if instance.Spec.Backup.Store != nil && instance.Spec.Backup.Store.SecretRef != nil { - storeSecret := instance.Spec.Backup.Store.SecretRef.Name - errors := createSecrets(c, instance.Namespace, storeSecret) - Expect(len(errors)).Should(BeZero()) - } - err = c.Create(context.TODO(), instance) - Expect(err).NotTo(HaveOccurred()) - s = &appsv1.StatefulSet{} - Eventually(func() error { return statefulsetIsCorrectlyReconciled(c, instance, s) }, timeout, pollingInterval).Should(BeNil()) - cm = &corev1.ConfigMap{} - Eventually(func() error { return configMapIsCorrectlyReconciled(c, instance, cm) }, timeout, pollingInterval).Should(BeNil()) - svc = &corev1.Service{} - Eventually(func() error { return serviceIsCorrectlyReconciled(c, instance, svc) }, timeout, pollingInterval).Should(BeNil()) - cj = &batchv1.CronJob{} - Eventually(func() error { return cronJobIsCorrectlyReconciled(c, instance, cj) }, timeout, pollingInterval).ShouldNot(BeNil()) - - setStatefulSetReady(s) - err = c.Status().Update(context.TODO(), s) - Expect(err).NotTo(HaveOccurred()) - - Expect(c.Delete(context.TODO(), instance)).To(Succeed()) - Eventually(func() error { return statefulSetRemoved(c, s) }, timeout, pollingInterval).Should(BeNil()) - Eventually(func() error { return etcdRemoved(c, instance) }, timeout, pollingInterval).Should(BeNil()) - }) - }) - - Context("when an existing cronjob is already present", func() { - It("should delete the existing cronjob", func() { - var err error - var instance *druidv1alpha1.Etcd - var c client.Client - var s *appsv1.StatefulSet - var cm *corev1.ConfigMap - var svc *corev1.Service - var cj *batchv1.CronJob - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - instance = getEtcd("foo49", "default", true) - c = mgr.GetClient() - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: instance.Namespace, - }, - } - - // Create CronJob - cj = createCronJob(getCronJobName(instance), instance.Namespace, instance.Spec.Labels) - Expect(c.Create(ctx, cj)).To(Succeed()) - Eventually(func() error { return cronJobIsCorrectlyReconciled(c, instance, cj) }, timeout, pollingInterval).Should(BeNil()) - - _, err = controllerutil.CreateOrUpdate(context.TODO(), c, &ns, func() error { return nil }) - Expect(err).To(Not(HaveOccurred())) - - if instance.Spec.Backup.Store != nil && instance.Spec.Backup.Store.SecretRef != nil { - storeSecret := instance.Spec.Backup.Store.SecretRef.Name - errors := createSecrets(c, instance.Namespace, storeSecret) - Expect(len(errors)).Should(BeZero()) - } - err = c.Create(context.TODO(), instance) - Expect(err).NotTo(HaveOccurred()) - - s = &appsv1.StatefulSet{} - Eventually(func() error { return statefulsetIsCorrectlyReconciled(c, instance, s) }, timeout, pollingInterval).Should(BeNil()) - cm = &corev1.ConfigMap{} - Eventually(func() error { return configMapIsCorrectlyReconciled(c, instance, cm) }, timeout, pollingInterval).Should(BeNil()) - svc = &corev1.Service{} - Eventually(func() error { return serviceIsCorrectlyReconciled(c, instance, svc) }, timeout, pollingInterval).Should(BeNil()) - //Cronjob should not exist - cj = &batchv1.CronJob{} - Eventually(func() error { return cronJobRemoved(c, cj) }, timeout, pollingInterval).Should(BeNil()) - - setStatefulSetReady(s) - err = c.Status().Update(context.TODO(), s) - Expect(err).NotTo(HaveOccurred()) - - Expect(c.Delete(context.TODO(), instance)).To(Succeed()) - Eventually(func() error { return statefulSetRemoved(c, s) }, timeout, pollingInterval).Should(BeNil()) - Eventually(func() error { return etcdRemoved(c, instance) }, timeout, pollingInterval).Should(BeNil()) - }) }) - }) + })*/ }) @@ -799,17 +777,36 @@ func podDeleted(c client.Client, etcd *druidv1alpha1.Etcd) error { } -func validateEtcdWithCronjob(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, cj *batchv1.CronJob, instance *druidv1alpha1.Etcd) { - validateEtcd(s, cm, svc, instance) +func validateEtcdApp(instance *druidv1alpha1.Etcd, s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service) { + // Validate Quota + configYML := cm.Data[etcdConfig] + config := map[string]interface{}{} + err := yaml.Unmarshal([]byte(configYML), &config) + Expect(err).NotTo(HaveOccurred()) + Expect(instance.Spec.Etcd.Quota).NotTo(BeNil()) + Expect(config).To(HaveKeyWithValue(quotaKey, float64(instance.Spec.Etcd.Quota.Value()))) + + // Validate Metrics MetricsLevel + Expect(instance.Spec.Etcd.Metrics).NotTo(BeNil()) + Expect(config).To(HaveKeyWithValue(metricsKey, string(*instance.Spec.Etcd.Metrics))) + + // Validate DefragmentationSchedule *string + Expect(instance.Spec.Etcd.DefragmentationSchedule).NotTo(BeNil()) + + // Validate Image + Expect(instance.Spec.Etcd.Image).NotTo(BeNil()) + + // Validate Resources + Expect(instance.Spec.Etcd.Resources).NotTo(BeNil()) store, err := utils.StorageProviderFromInfraProvider(instance.Spec.Backup.Store.Provider) Expect(err).NotTo(HaveOccurred()) - Expect(*cj).To(MatchFields(IgnoreExtras, Fields{ + Expect(*cm).To(MatchFields(IgnoreExtras, Fields{ "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(getCronJobName(instance)), + "Name": Equal(fmt.Sprintf("etcd-bootstrap-%s", string(instance.UID[:6]))), "Namespace": Equal(instance.Namespace), - "Labels": MatchKeys(IgnoreExtras, Keys{ + "Labels": MatchAllKeys(Keys{ "name": Equal("etcd"), "instance": Equal(instance.Name), }), @@ -824,505 +821,47 @@ func validateEtcdWithCronjob(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *c }), }), }), - "Spec": MatchFields(IgnoreExtras, Fields{ - "ConcurrencyPolicy": Equal(batchv1.ForbidConcurrent), - "JobTemplate": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "BackoffLimit": PointTo(Equal(int32(0))), - "Template": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "RestartPolicy": Equal(corev1.RestartPolicyNever), - "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - "compact-backup": MatchFields(IgnoreExtras, Fields{ - "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--data-dir=/var/etcd/data": Equal("--data-dir=/var/etcd/data"), - "--snapstore-temp-directory=/var/etcd/data/tmp": Equal("--snapstore-temp-directory=/var/etcd/data/tmp"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), - fmt.Sprintf("%s=%s", "--storage-provider", store): Equal(fmt.Sprintf("%s=%s", "--storage-provider", store)), - fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), - fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value())): Equal(fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value()))), - fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String())), - fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String())), - }), - "Ports": ConsistOf([]corev1.ContainerPort{ - corev1.ContainerPort{ - Name: "server", - Protocol: corev1.ProtocolTCP, - HostPort: 0, - ContainerPort: backupPort, - }, - }), - //"Image": Equal(*instance.Spec.Backup.Image), - "ImagePullPolicy": Equal(corev1.PullIfNotPresent), - "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ - "etcd-config-file": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-config-file"), - "MountPath": Equal("/var/etcd/config/"), - }), - "etcd-workspace-dir": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-workspace-dir"), - "MountPath": Equal("/var/etcd/data"), - }), - }), - "Env": MatchElements(envIterator, IgnoreExtras, Elements{ - "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(*instance.Spec.Backup.Store.Container), - }), - }), - }), - }), - "Volumes": MatchAllElements(volumeIterator, Elements{ - "etcd-config-file": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-config-file"), - "VolumeSource": MatchFields(IgnoreExtras, Fields{ - "ConfigMap": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(fmt.Sprintf("etcd-bootstrap-%s", string(instance.UID[:6]))), - }), - "DefaultMode": PointTo(Equal(int32(0644))), - "Items": MatchAllElements(keyIterator, Elements{ - "etcd.conf.yaml": MatchFields(IgnoreExtras, Fields{ - "Key": Equal("etcd.conf.yaml"), - "Path": Equal("etcd.conf.yaml"), - }), - }), - })), - }), - }), - "etcd-workspace-dir": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-workspace-dir"), - "VolumeSource": MatchFields(IgnoreExtras, Fields{ - "HostPath": BeNil(), - "EmptyDir": PointTo(MatchFields(IgnoreExtras, Fields{ - "SizeLimit": BeNil(), - })), - }), - }), - }), - }), - }), - }), - }), - }), })) -} -func validateStoreGCPWithCronjob(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, cj *batchv1.CronJob, instance *druidv1alpha1.Etcd) { - validateStoreGCP(s, cm, svc, instance) + Expect(config).To(MatchKeys(IgnoreExtras, Keys{ + "name": Equal(fmt.Sprintf("etcd-%s", instance.UID[:6])), + "data-dir": Equal("/var/etcd/data/new.etcd"), + "metrics": Equal(string(*instance.Spec.Etcd.Metrics)), + "snapshot-count": Equal(float64(75000)), + "enable-v2": Equal(false), + "quota-backend-bytes": Equal(float64(instance.Spec.Etcd.Quota.Value())), + "listen-client-urls": Equal(fmt.Sprintf("https://0.0.0.0:%d", *instance.Spec.Etcd.ClientPort)), + "advertise-client-urls": Equal(fmt.Sprintf("https://0.0.0.0:%d", *instance.Spec.Etcd.ClientPort)), + "initial-cluster-token": Equal("initial"), + "initial-cluster-state": Equal("new"), + "auto-compaction-mode": Equal(string(*instance.Spec.Common.AutoCompactionMode)), + "auto-compaction-retention": Equal(*instance.Spec.Common.AutoCompactionRetention), - Expect(*cj).To(MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "JobTemplate": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Template": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - "compact-backup": MatchFields(IgnoreExtras, Fields{ - "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--storage-provider=GCS": Equal("--storage-provider=GCS"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), - fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), - }), - "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ - "etcd-backup": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-backup"), - "MountPath": Equal("/root/.gcp/"), - }), - "etcd-config-file": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-config-file"), - "MountPath": Equal("/var/etcd/config/"), - }), - }), - "Env": MatchAllElements(envIterator, Elements{ - "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(*instance.Spec.Backup.Store.Container), - }), - "GOOGLE_APPLICATION_CREDENTIALS": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("GOOGLE_APPLICATION_CREDENTIALS"), - "Value": Equal("/root/.gcp/serviceaccount.json"), - }), - }), - }), - }), - "Volumes": MatchElements(volumeIterator, IgnoreExtras, Elements{ - "etcd-backup": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-backup"), - "VolumeSource": MatchFields(IgnoreExtras, Fields{ - "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretName": Equal(instance.Spec.Backup.Store.SecretRef.Name), - })), - }), - }), - "etcd-config-file": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-config-file"), - "VolumeSource": MatchFields(IgnoreExtras, Fields{ - "ConfigMap": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(fmt.Sprintf("etcd-bootstrap-%s", string(instance.UID[:6]))), - }), - "DefaultMode": PointTo(Equal(int32(0644))), - "Items": MatchAllElements(keyIterator, Elements{ - "etcd.conf.yaml": MatchFields(IgnoreExtras, Fields{ - "Key": Equal("etcd.conf.yaml"), - "Path": Equal("etcd.conf.yaml"), - }), - }), - })), - }), - }), - }), - }), - }), - }), - }), + "client-transport-security": MatchKeys(IgnoreExtras, Keys{ + "cert-file": Equal("/var/etcd/ssl/server/tls.crt"), + "key-file": Equal("/var/etcd/ssl/server/tls.key"), + "client-cert-auth": Equal(true), + "trusted-ca-file": Equal("/var/etcd/ssl/ca/ca.crt"), + "auto-tls": Equal(false), }), })) -} - -func validateStoreAWSWithCronjob(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, cj *batchv1.CronJob, instance *druidv1alpha1.Etcd) { - validateStoreAWS(s, cm, svc, instance) - Expect(*cj).To(MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "JobTemplate": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Template": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - "compact-backup": MatchFields(IgnoreExtras, Fields{ - "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--storage-provider=S3": Equal("--storage-provider=S3"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), - fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), - }), - "Env": MatchAllElements(envIterator, Elements{ - "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(*instance.Spec.Backup.Store.Container), - }), - "AWS_REGION": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("AWS_REGION"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("region"), - })), - })), - }), - "AWS_SECRET_ACCESS_KEY": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("AWS_SECRET_ACCESS_KEY"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("secretAccessKey"), - })), - })), - }), - "AWS_ACCESS_KEY_ID": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("AWS_ACCESS_KEY_ID"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("accessKeyID"), - })), - })), - }), - }), - }), - }), - }), - }), - }), - }), - }), - })) -} - -func validateStoreAzureWithCronjob(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, cj *batchv1.CronJob, instance *druidv1alpha1.Etcd) { - validateStoreAzure(s, cm, svc, instance) - - Expect(*cj).To(MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "JobTemplate": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Template": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - "compact-backup": MatchFields(IgnoreExtras, Fields{ - "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--storage-provider=ABS": Equal("--storage-provider=ABS"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), - fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), - }), - "Env": MatchAllElements(envIterator, Elements{ - "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(*instance.Spec.Backup.Store.Container), - }), - "STORAGE_ACCOUNT": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_ACCOUNT"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("storageAccount"), - })), - })), - }), - "STORAGE_KEY": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_KEY"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("storageKey"), - })), - })), - }), - }), - }), - }), - }), - }), - }), - }), - }), - })) -} - -func validateStoreOpenstackWithCronjob(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, cj *batchv1.CronJob, instance *druidv1alpha1.Etcd) { - validateStoreOpenstack(s, cm, svc, instance) - - Expect(*cj).To(MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "JobTemplate": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Template": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - "compact-backup": MatchFields(IgnoreExtras, Fields{ - "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--storage-provider=Swift": Equal("--storage-provider=Swift"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), - fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), - }), - "Env": MatchAllElements(envIterator, Elements{ - "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(*instance.Spec.Backup.Store.Container), - }), - "OS_AUTH_URL": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OS_AUTH_URL"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("authURL"), - })), - })), - }), - "OS_USERNAME": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OS_USERNAME"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("username"), - })), - })), - }), - "OS_TENANT_NAME": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OS_TENANT_NAME"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("tenantName"), - })), - })), - }), - "OS_PASSWORD": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OS_PASSWORD"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("password"), - })), - })), - }), - "OS_DOMAIN_NAME": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OS_DOMAIN_NAME"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("domainName"), - })), - })), - }), - }), - }), - }), - }), - }), - }), - }), - }), - })) -} - -func validateStoreAlicloudWithCronjob(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, cj *batchv1.CronJob, instance *druidv1alpha1.Etcd) { - validateStoreAlicloud(s, cm, svc, instance) - - Expect(*cj).To(MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "JobTemplate": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Template": MatchFields(IgnoreExtras, Fields{ - "Spec": MatchFields(IgnoreExtras, Fields{ - "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - "compact-backup": MatchFields(IgnoreExtras, Fields{ - "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--storage-provider=OSS": Equal("--storage-provider=OSS"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), - fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), - }), - "ImagePullPolicy": Equal(corev1.PullIfNotPresent), - "Env": MatchAllElements(envIterator, Elements{ - "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(*instance.Spec.Backup.Store.Container), - }), - "ALICLOUD_ENDPOINT": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("ALICLOUD_ENDPOINT"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("storageEndpoint"), - })), - })), - }), - "ALICLOUD_ACCESS_KEY_SECRET": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("ALICLOUD_ACCESS_KEY_SECRET"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("accessKeySecret"), - })), - })), - }), - "ALICLOUD_ACCESS_KEY_ID": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("ALICLOUD_ACCESS_KEY_ID"), - "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ - "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), - }), - "Key": Equal("accessKeyID"), - })), - })), - }), - }), - }), - }), - }), - }), - }), - }), - }), - })) -} - -func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, instance *druidv1alpha1.Etcd) { - // Validate Quota - configYML := cm.Data[etcdConfig] - config := map[string]string{} - err := yaml.Unmarshal([]byte(configYML), &config) - Expect(err).NotTo(HaveOccurred()) - Expect(instance.Spec.Etcd.Quota).To(BeNil()) - Expect(config).To(HaveKeyWithValue(quotaKey, fmt.Sprintf("%d", int64(quota.Value())))) - - // Validate Metrics MetricsLevel - Expect(instance.Spec.Etcd.Metrics).To(BeNil()) - Expect(config).To(HaveKeyWithValue(metricsKey, string(druidv1alpha1.Basic))) - - // Validate DefragmentationSchedule *string - Expect(instance.Spec.Etcd.DefragmentationSchedule).To(BeNil()) - - // Validate ServerPort and ClientPort - Expect(instance.Spec.Etcd.ServerPort).To(BeNil()) - Expect(instance.Spec.Etcd.ClientPort).To(BeNil()) - - Expect(instance.Spec.Etcd.Image).To(BeNil()) - imageVector, err := imagevector.ReadGlobalImageVectorWithEnvOverride(getImageYAMLPath()) - Expect(err).NotTo(HaveOccurred()) - images, err := imagevector.FindImages(imageVector, imageNames) - Expect(err).NotTo(HaveOccurred()) - - // Validate Resources - // resources: - // limits: - // cpu: 100m - // memory: 512Gi - // requests: - // cpu: 50m - // memory: 128Mi - Expect(instance.Spec.Etcd.Resources).To(BeNil()) - - // Validate TLS. Ensure that enableTLS flag is not triggered in the go-template - Expect(instance.Spec.Etcd.TLS).To(BeNil()) - - Expect(config).To(MatchKeys(IgnoreExtras, Keys{ - "name": Equal(fmt.Sprintf("etcd-%s", instance.UID[:6])), - "data-dir": Equal("/var/etcd/data/new.etcd"), - "metrics": Equal(string(druidv1alpha1.Basic)), - "snapshot-count": Equal("75000"), - "enable-v2": Equal("false"), - "quota-backend-bytes": Equal("8589934592"), - "listen-client-urls": Equal(fmt.Sprintf("http://0.0.0.0:%d", clientPort)), - "advertise-client-urls": Equal(fmt.Sprintf("http://0.0.0.0:%d", clientPort)), - "initial-cluster-token": Equal("initial"), - "initial-cluster-state": Equal("new"), - "auto-compaction-mode": Equal(string(druidv1alpha1.Periodic)), - "auto-compaction-retention": Equal(DefaultAutoCompactionRetention), - })) - - Expect(*svc).To(MatchFields(IgnoreExtras, Fields{ - "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(fmt.Sprintf("%s-client", instance.Name)), - "Namespace": Equal(instance.Namespace), - "Labels": MatchAllKeys(Keys{ - "name": Equal("etcd"), - "instance": Equal(instance.Name), - }), - "OwnerReferences": MatchElements(ownerRefIterator, IgnoreExtras, Elements{ - instance.Name: MatchFields(IgnoreExtras, Fields{ - "APIVersion": Equal("druid.gardener.cloud/v1alpha1"), - "Kind": Equal("Etcd"), - "Name": Equal(instance.Name), - "UID": Equal(instance.UID), - "Controller": PointTo(Equal(true)), - "BlockOwnerDeletion": PointTo(Equal(true)), + Expect(*svc).To(MatchFields(IgnoreExtras, Fields{ + "ObjectMeta": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(fmt.Sprintf("%s-client", instance.Name)), + "Namespace": Equal(instance.Namespace), + "Labels": MatchAllKeys(Keys{ + "name": Equal("etcd"), + "instance": Equal(instance.Name), + }), + "OwnerReferences": MatchElements(ownerRefIterator, IgnoreExtras, Elements{ + instance.Name: MatchFields(IgnoreExtras, Fields{ + "APIVersion": Equal("druid.gardener.cloud/v1alpha1"), + "Kind": Equal("Etcd"), + "Name": Equal(instance.Name), + "UID": Equal(instance.UID), + "Controller": PointTo(Equal(true)), + "BlockOwnerDeletion": PointTo(Equal(true)), }), }), }), @@ -1337,25 +876,25 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * "client": MatchFields(IgnoreExtras, Fields{ "Name": Equal("client"), "Protocol": Equal(corev1.ProtocolTCP), - "Port": Equal(clientPort), + "Port": Equal(*instance.Spec.Etcd.ClientPort), "TargetPort": MatchFields(IgnoreExtras, Fields{ - "IntVal": Equal(clientPort), + "IntVal": Equal(*instance.Spec.Etcd.ClientPort), }), }), "server": MatchFields(IgnoreExtras, Fields{ "Name": Equal("server"), "Protocol": Equal(corev1.ProtocolTCP), - "Port": Equal(serverPort), + "Port": Equal(*instance.Spec.Etcd.ServerPort), "TargetPort": MatchFields(IgnoreExtras, Fields{ - "IntVal": Equal(serverPort), + "IntVal": Equal(*instance.Spec.Etcd.ServerPort), }), }), "backuprestore": MatchFields(IgnoreExtras, Fields{ "Name": Equal("backuprestore"), "Protocol": Equal(corev1.ProtocolTCP), - "Port": Equal(backupPort), + "Port": Equal(*instance.Spec.Backup.Port), "TargetPort": MatchFields(IgnoreExtras, Fields{ - "IntVal": Equal(backupPort), + "IntVal": Equal(*instance.Spec.Backup.Port), }), }), }), @@ -1378,6 +917,7 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * "instance": Equal(instance.Name), }), }), + "Spec": MatchFields(IgnoreExtras, Fields{ "UpdateStrategy": MatchFields(IgnoreExtras, Fields{ "Type": Equal(appsv1.RollingUpdateStatefulSetStrategyType), @@ -1402,6 +942,7 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * "instance": Equal(instance.Name), }), }), + //s.Spec.Template.Spec.HostAliases "Spec": MatchFields(IgnoreExtras, Fields{ "HostAliases": MatchAllElements(hostAliasIterator, Elements{ "127.0.0.1": MatchFields(IgnoreExtras, Fields{ @@ -1411,36 +952,36 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * }), }), }), - "PriorityClassName": Equal(""), + "PriorityClassName": Equal(*instance.Spec.PriorityClassName), "Containers": MatchAllElements(containerIterator, Elements{ common.Etcd: MatchFields(IgnoreExtras, Fields{ "Ports": ConsistOf([]corev1.ContainerPort{ - corev1.ContainerPort{ + { Name: "server", Protocol: corev1.ProtocolTCP, HostPort: 0, - ContainerPort: serverPort, + ContainerPort: *instance.Spec.Etcd.ServerPort, }, - corev1.ContainerPort{ + { Name: "client", Protocol: corev1.ProtocolTCP, HostPort: 0, - ContainerPort: clientPort, + ContainerPort: *instance.Spec.Etcd.ClientPort, }, }), "Command": MatchAllElements(cmdIterator, Elements{ "/var/etcd/bin/bootstrap.sh": Equal("/var/etcd/bin/bootstrap.sh"), }), "ImagePullPolicy": Equal(corev1.PullIfNotPresent), - "Image": Equal(fmt.Sprintf("%s:%s", images[common.Etcd].Repository, *images[common.Etcd].Tag)), + "Image": Equal(*instance.Spec.Etcd.Image), "Resources": MatchFields(IgnoreExtras, Fields{ "Requests": MatchKeys(IgnoreExtras, Keys{ - corev1.ResourceCPU: Equal(resource.MustParse("50m")), - corev1.ResourceMemory: Equal(resource.MustParse("128Mi")), + corev1.ResourceCPU: Equal(instance.Spec.Etcd.Resources.Requests[corev1.ResourceCPU]), + corev1.ResourceMemory: Equal(instance.Spec.Etcd.Resources.Requests[corev1.ResourceMemory]), }), "Limits": MatchKeys(IgnoreExtras, Keys{ - corev1.ResourceCPU: Equal(resource.MustParse("100m")), - corev1.ResourceMemory: Equal(resource.MustParse("512Gi")), + corev1.ResourceCPU: Equal(instance.Spec.Etcd.Resources.Limits[corev1.ResourceCPU]), + corev1.ResourceMemory: Equal(instance.Spec.Etcd.Resources.Limits[corev1.ResourceMemory]), }), }), "ReadinessProbe": PointTo(MatchFields(IgnoreExtras, Fields{ @@ -1450,7 +991,7 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * "Port": MatchFields(IgnoreExtras, Fields{ "IntVal": Equal(int32(8080)), }), - "Scheme": Equal(corev1.URISchemeHTTP), + "Scheme": Equal(corev1.URISchemeHTTPS), })), }), "InitialDelaySeconds": Equal(int32(15)), @@ -1460,11 +1001,14 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * "Handler": MatchFields(IgnoreExtras, Fields{ "Exec": PointTo(MatchFields(IgnoreExtras, Fields{ "Command": MatchAllElements(cmdIterator, Elements{ - "/bin/sh": Equal("/bin/sh"), - "-ec": Equal("-ec"), - "ETCDCTL_API=3": Equal("ETCDCTL_API=3"), - "etcdctl": Equal("etcdctl"), - fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort)), + "/bin/sh": Equal("/bin/sh"), + "-ec": Equal("-ec"), + "ETCDCTL_API=3": Equal("ETCDCTL_API=3"), + "etcdctl": Equal("etcdctl"), + "--cert=/var/etcd/ssl/client/tls.crt": Equal("--cert=/var/etcd/ssl/client/tls.crt"), + "--key=/var/etcd/ssl/client/tls.key": Equal("--key=/var/etcd/ssl/client/tls.key"), + "--cacert=/var/etcd/ssl/ca/ca.crt": Equal("--cacert=/var/etcd/ssl/ca/ca.crt"), + fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort)), "get": Equal("get"), "foo": Equal("foo"), }), @@ -1474,14 +1018,26 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * "PeriodSeconds": Equal(int32(5)), })), "VolumeMounts": MatchAllElements(volumeMountIterator, Elements{ - instance.Name: MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Name), + *instance.Spec.VolumeClaimTemplate: MatchFields(IgnoreExtras, Fields{ + "Name": Equal(*instance.Spec.VolumeClaimTemplate), "MountPath": Equal("/var/etcd/data/"), }), "etcd-config-file": MatchFields(IgnoreExtras, Fields{ "Name": Equal("etcd-config-file"), "MountPath": Equal("/var/etcd/config/"), }), + "ca-etcd": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("ca-etcd"), + "MountPath": Equal("/var/etcd/ssl/ca"), + }), + "etcd-server-tls": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-server-tls"), + "MountPath": Equal("/var/etcd/ssl/server"), + }), + "etcd-client-tls": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-client-tls"), + "MountPath": Equal("/var/etcd/ssl/client"), + }), }), }), @@ -1489,45 +1045,68 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * "Command": MatchAllElements(cmdIterator, Elements{ "etcdbrctl": Equal("etcdbrctl"), "server": Equal("server"), + "--cert=/var/etcd/ssl/client/tls.crt": Equal("--cert=/var/etcd/ssl/client/tls.crt"), + "--key=/var/etcd/ssl/client/tls.key": Equal("--key=/var/etcd/ssl/client/tls.key"), + "--cacert=/var/etcd/ssl/ca/ca.crt": Equal("--cacert=/var/etcd/ssl/ca/ca.crt"), + "--server-cert=/var/etcd/ssl/server/tls.crt": Equal("--server-cert=/var/etcd/ssl/server/tls.crt"), + "--server-key=/var/etcd/ssl/server/tls.key": Equal("--server-key=/var/etcd/ssl/server/tls.key"), "--data-dir=/var/etcd/data/new.etcd": Equal("--data-dir=/var/etcd/data/new.etcd"), - "--insecure-transport=true": Equal("--insecure-transport=true"), - "--insecure-skip-tls-verify=true": Equal("--insecure-skip-tls-verify=true"), - "--etcd-connection-timeout=5m": Equal("--etcd-connection-timeout=5m"), + "--insecure-transport=false": Equal("--insecure-transport=false"), + "--insecure-skip-tls-verify=false": Equal("--insecure-skip-tls-verify=false"), "--snapstore-temp-directory=/var/etcd/data/temp": Equal("--snapstore-temp-directory=/var/etcd/data/temp"), - fmt.Sprintf("--delta-snapshot-memory-limit=%d", deltaSnapShotMemLimit.Value()): Equal(fmt.Sprintf("--delta-snapshot-memory-limit=%d", deltaSnapShotMemLimit.Value())), - fmt.Sprintf("--garbage-collection-policy=%s", druidv1alpha1.GarbageCollectionPolicyLimitBased): Equal(fmt.Sprintf("--garbage-collection-policy=%s", druidv1alpha1.GarbageCollectionPolicyLimitBased)), - fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort)), - fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(quota.Value())): Equal(fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(quota.Value()))), - fmt.Sprintf("--max-backups=%d", maxBackups): Equal(fmt.Sprintf("--max-backups=%d", maxBackups)), - fmt.Sprintf("--auto-compaction-mode=%s", druidv1alpha1.Periodic): Equal(fmt.Sprintf("--auto-compaction-mode=%s", druidv1alpha1.Periodic)), - fmt.Sprintf("--auto-compaction-retention=%s", DefaultAutoCompactionRetention): Equal(fmt.Sprintf("--auto-compaction-retention=%s", DefaultAutoCompactionRetention)), - fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", "8m"): Equal(fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", "8m")), - fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", "8m"): Equal(fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", "8m")), + "--etcd-connection-timeout=5m": Equal("--etcd-connection-timeout=5m"), + fmt.Sprintf("--defragmentation-schedule=%s", *instance.Spec.Etcd.DefragmentationSchedule): Equal(fmt.Sprintf("--defragmentation-schedule=%s", *instance.Spec.Etcd.DefragmentationSchedule)), + fmt.Sprintf("--schedule=%s", *instance.Spec.Backup.FullSnapshotSchedule): Equal(fmt.Sprintf("--schedule=%s", *instance.Spec.Backup.FullSnapshotSchedule)), + fmt.Sprintf("%s=%s", "--garbage-collection-policy", *instance.Spec.Backup.GarbageCollectionPolicy): Equal(fmt.Sprintf("%s=%s", "--garbage-collection-policy", *instance.Spec.Backup.GarbageCollectionPolicy)), + fmt.Sprintf("%s=%s", "--storage-provider", store): Equal(fmt.Sprintf("%s=%s", "--storage-provider", store)), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("--delta-snapshot-memory-limit=%d", instance.Spec.Backup.DeltaSnapshotMemoryLimit.Value()): Equal(fmt.Sprintf("--delta-snapshot-memory-limit=%d", instance.Spec.Backup.DeltaSnapshotMemoryLimit.Value())), + fmt.Sprintf("--garbage-collection-policy=%s", *instance.Spec.Backup.GarbageCollectionPolicy): Equal(fmt.Sprintf("--garbage-collection-policy=%s", *instance.Spec.Backup.GarbageCollectionPolicy)), + fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort)), + fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value())): Equal(fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value()))), + fmt.Sprintf("%s=%s", "--delta-snapshot-period", instance.Spec.Backup.DeltaSnapshotPeriod.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--delta-snapshot-period", instance.Spec.Backup.DeltaSnapshotPeriod.Duration.String())), + fmt.Sprintf("%s=%s", "--garbage-collection-period", instance.Spec.Backup.GarbageCollectionPeriod.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--garbage-collection-period", instance.Spec.Backup.GarbageCollectionPeriod.Duration.String())), + fmt.Sprintf("%s=%s", "--auto-compaction-mode", *instance.Spec.Common.AutoCompactionMode): Equal(fmt.Sprintf("%s=%s", "--auto-compaction-mode", autoCompactionMode)), + fmt.Sprintf("%s=%s", "--auto-compaction-retention", *instance.Spec.Common.AutoCompactionRetention): Equal(fmt.Sprintf("%s=%s", "--auto-compaction-retention", autoCompactionRetention)), + fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String())), + fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String())), }), "Ports": ConsistOf([]corev1.ContainerPort{ - corev1.ContainerPort{ + { Name: "server", Protocol: corev1.ProtocolTCP, HostPort: 0, ContainerPort: backupPort, }, }), - "Image": Equal(fmt.Sprintf("%s:%s", images[common.BackupRestore].Repository, *images[common.BackupRestore].Tag)), + "Image": Equal(*instance.Spec.Backup.Image), "ImagePullPolicy": Equal(corev1.PullIfNotPresent), - "VolumeMounts": MatchAllElements(volumeMountIterator, Elements{ - instance.Name: MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Name), + "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ + *instance.Spec.VolumeClaimTemplate: MatchFields(IgnoreExtras, Fields{ + "Name": Equal(*instance.Spec.VolumeClaimTemplate), "MountPath": Equal("/var/etcd/data"), }), "etcd-config-file": MatchFields(IgnoreExtras, Fields{ "Name": Equal("etcd-config-file"), "MountPath": Equal("/var/etcd/config/"), }), + "ca-etcd": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("ca-etcd"), + "MountPath": Equal("/var/etcd/ssl/ca"), + }), + "etcd-server-tls": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-server-tls"), + "MountPath": Equal("/var/etcd/ssl/server"), + }), + "etcd-client-tls": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-client-tls"), + "MountPath": Equal("/var/etcd/ssl/client"), + }), }), - "Env": MatchAllElements(envIterator, Elements{ + "Env": MatchElements(envIterator, IgnoreExtras, Elements{ "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(""), + "Value": Equal(*instance.Spec.Backup.Store.Container), }), }), }), @@ -1550,21 +1129,46 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * })), }), }), - }), - }), - }), + "etcd-server-tls": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-server-tls"), + "VolumeSource": MatchFields(IgnoreExtras, Fields{ + "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretName": Equal(instance.Spec.Etcd.TLS.ServerTLSSecretRef.Name), + })), + }), + }), + "etcd-client-tls": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-client-tls"), + "VolumeSource": MatchFields(IgnoreExtras, Fields{ + "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretName": Equal(instance.Spec.Etcd.TLS.ClientTLSSecretRef.Name), + })), + }), + }), + "ca-etcd": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("ca-etcd"), + "VolumeSource": MatchFields(IgnoreExtras, Fields{ + "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretName": Equal(instance.Spec.Etcd.TLS.TLSCASecretRef.Name), + })), + }), + }), + }), + }), + }), "VolumeClaimTemplates": MatchAllElements(pvcIterator, Elements{ - instance.Name: MatchFields(IgnoreExtras, Fields{ + *instance.Spec.VolumeClaimTemplate: MatchFields(IgnoreExtras, Fields{ "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(instance.Name), + "Name": Equal(*instance.Spec.VolumeClaimTemplate), }), "Spec": MatchFields(IgnoreExtras, Fields{ + "StorageClassName": PointTo(Equal(*instance.Spec.StorageClass)), "AccessModes": MatchAllElements(accessModeIterator, Elements{ "ReadWriteOnce": Equal(corev1.ReadWriteOnce), }), "Resources": MatchFields(IgnoreExtras, Fields{ "Requests": MatchKeys(IgnoreExtras, Keys{ - corev1.ResourceStorage: Equal(defaultStorageCapacity), + corev1.ResourceStorage: Equal(*instance.Spec.StorageCapacity), }), }), }), @@ -1572,43 +1176,16 @@ func validateEtcdWithDefaults(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc * }), }), })) - } -func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, instance *druidv1alpha1.Etcd) { - - // Validate Quota - configYML := cm.Data[etcdConfig] - config := map[string]interface{}{} - err := yaml.Unmarshal([]byte(configYML), &config) - Expect(err).NotTo(HaveOccurred()) - Expect(instance.Spec.Etcd.Quota).NotTo(BeNil()) - Expect(config).To(HaveKeyWithValue(quotaKey, float64(instance.Spec.Etcd.Quota.Value()))) - - // Validate Metrics MetricsLevel - Expect(instance.Spec.Etcd.Metrics).NotTo(BeNil()) - Expect(config).To(HaveKeyWithValue(metricsKey, string(*instance.Spec.Etcd.Metrics))) - - // Validate DefragmentationSchedule *string - Expect(instance.Spec.Etcd.DefragmentationSchedule).NotTo(BeNil()) - - // Validate Image - Expect(instance.Spec.Etcd.Image).NotTo(BeNil()) - - // Validate Resources - Expect(instance.Spec.Etcd.Resources).NotTo(BeNil()) - +func validateEtcdForCmpctJob(instance *druidv1alpha1.Etcd, j *batchv1.Job) { store, err := utils.StorageProviderFromInfraProvider(instance.Spec.Backup.Store.Provider) Expect(err).NotTo(HaveOccurred()) - Expect(*cm).To(MatchFields(IgnoreExtras, Fields{ + Expect(*j).To(MatchFields(IgnoreExtras, Fields{ "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(fmt.Sprintf("etcd-bootstrap-%s", string(instance.UID[:6]))), + "Name": Equal(getJobName(instance)), "Namespace": Equal(instance.Namespace), - "Labels": MatchAllKeys(Keys{ - "name": Equal("etcd"), - "instance": Equal(instance.Name), - }), "OwnerReferences": MatchElements(ownerRefIterator, IgnoreExtras, Elements{ instance.Name: MatchFields(IgnoreExtras, Fields{ "APIVersion": Equal("druid.gardener.cloud/v1alpha1"), @@ -1620,29 +1197,155 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi }), }), }), + "Spec": MatchFields(IgnoreExtras, Fields{ + "BackoffLimit": PointTo(Equal(int32(0))), + "Template": MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "RestartPolicy": Equal(corev1.RestartPolicyNever), + "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ + "compact-backup": MatchFields(IgnoreExtras, Fields{ + "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ + "--data-dir=/var/etcd/data": Equal("--data-dir=/var/etcd/data"), + "--snapstore-temp-directory=/var/etcd/data/tmp": Equal("--snapstore-temp-directory=/var/etcd/data/tmp"), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("%s=%s", "--storage-provider", store): Equal(fmt.Sprintf("%s=%s", "--storage-provider", store)), + fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), + fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value())): Equal(fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value()))), + fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String())), + fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String())), + }), + "Image": Equal(*instance.Spec.Backup.Image), + "ImagePullPolicy": Equal(corev1.PullIfNotPresent), + "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ + "etcd-workspace-dir": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-workspace-dir"), + "MountPath": Equal("/var/etcd/data"), + }), + }), + "Env": MatchElements(envIterator, IgnoreExtras, Elements{ + "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_CONTAINER"), + "Value": Equal(*instance.Spec.Backup.Store.Container), + }), + }), + }), + }), + "Volumes": MatchAllElements(volumeIterator, Elements{ + "etcd-workspace-dir": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-workspace-dir"), + "VolumeSource": MatchFields(IgnoreExtras, Fields{ + "HostPath": BeNil(), + "EmptyDir": PointTo(MatchFields(IgnoreExtras, Fields{ + "SizeLimit": BeNil(), + })), + }), + }), + }), + }), + }), + }), + })) +} + +func validateStoreGCPForETCDApp(instance *druidv1alpha1.Etcd, s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service) { + Expect(*s).To(MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Template": MatchFields(IgnoreExtras, Fields{ + //s.Spec.Template.Spec.HostAliases + "Spec": MatchFields(IgnoreExtras, Fields{ + "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ + backupRestore: MatchFields(IgnoreExtras, Fields{ + "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ + "--storage-provider=GCS": Equal("--storage-provider=GCS"), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + }), + "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ + "etcd-backup": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-backup"), + "MountPath": Equal("/root/.gcp/"), + }), + }), + "Env": MatchAllElements(envIterator, Elements{ + "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_CONTAINER"), + "Value": Equal(*instance.Spec.Backup.Store.Container), + }), + "GOOGLE_APPLICATION_CREDENTIALS": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("GOOGLE_APPLICATION_CREDENTIALS"), + "Value": Equal("/root/.gcp/serviceaccount.json"), + }), + }), + }), + }), + "Volumes": MatchElements(volumeIterator, IgnoreExtras, Elements{ + "etcd-backup": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-backup"), + "VolumeSource": MatchFields(IgnoreExtras, Fields{ + "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretName": Equal(instance.Spec.Backup.Store.SecretRef.Name), + })), + }), + }), + }), + }), + }), + }), })) +} + +func validateEtcdAppWithDefaults(instance *druidv1alpha1.Etcd, s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service) { + // Validate Quota + configYML := cm.Data[etcdConfig] + config := map[string]string{} + err := yaml.Unmarshal([]byte(configYML), &config) + Expect(err).NotTo(HaveOccurred()) + Expect(instance.Spec.Etcd.Quota).To(BeNil()) + Expect(config).To(HaveKeyWithValue(quotaKey, fmt.Sprintf("%d", int64(quota.Value())))) + + // Validate Metrics MetricsLevel + Expect(instance.Spec.Etcd.Metrics).To(BeNil()) + Expect(config).To(HaveKeyWithValue(metricsKey, string(druidv1alpha1.Basic))) + + // Validate DefragmentationSchedule *string + Expect(instance.Spec.Etcd.DefragmentationSchedule).To(BeNil()) + + // Validate ServerPort and ClientPort + Expect(instance.Spec.Etcd.ServerPort).To(BeNil()) + Expect(instance.Spec.Etcd.ClientPort).To(BeNil()) + + Expect(instance.Spec.Etcd.Image).To(BeNil()) + imageVector, err := imagevector.ReadGlobalImageVectorWithEnvOverride(getImageYAMLPath()) + Expect(err).NotTo(HaveOccurred()) + images, err := imagevector.FindImages(imageVector, imageNames) + Expect(err).NotTo(HaveOccurred()) + + // Validate Resources + // resources: + // limits: + // cpu: 100m + // memory: 512Gi + // requests: + // cpu: 50m + // memory: 128Mi + Expect(instance.Spec.Etcd.Resources).To(BeNil()) + + // Validate TLS. Ensure that enableTLS flag is not triggered in the go-template + Expect(instance.Spec.Etcd.TLS).To(BeNil()) + Expect(config).To(MatchKeys(IgnoreExtras, Keys{ "name": Equal(fmt.Sprintf("etcd-%s", instance.UID[:6])), "data-dir": Equal("/var/etcd/data/new.etcd"), - "metrics": Equal(string(*instance.Spec.Etcd.Metrics)), - "snapshot-count": Equal(float64(75000)), - "enable-v2": Equal(false), - "quota-backend-bytes": Equal(float64(instance.Spec.Etcd.Quota.Value())), - "listen-client-urls": Equal(fmt.Sprintf("https://0.0.0.0:%d", *instance.Spec.Etcd.ClientPort)), - "advertise-client-urls": Equal(fmt.Sprintf("https://0.0.0.0:%d", *instance.Spec.Etcd.ClientPort)), + "metrics": Equal(string(druidv1alpha1.Basic)), + "snapshot-count": Equal("75000"), + "enable-v2": Equal("false"), + "quota-backend-bytes": Equal("8589934592"), + "listen-client-urls": Equal(fmt.Sprintf("http://0.0.0.0:%d", clientPort)), + "advertise-client-urls": Equal(fmt.Sprintf("http://0.0.0.0:%d", clientPort)), "initial-cluster-token": Equal("initial"), "initial-cluster-state": Equal("new"), - "auto-compaction-mode": Equal(string(*instance.Spec.Common.AutoCompactionMode)), - "auto-compaction-retention": Equal(*instance.Spec.Common.AutoCompactionRetention), - - "client-transport-security": MatchKeys(IgnoreExtras, Keys{ - "cert-file": Equal("/var/etcd/ssl/server/tls.crt"), - "key-file": Equal("/var/etcd/ssl/server/tls.key"), - "client-cert-auth": Equal(true), - "trusted-ca-file": Equal("/var/etcd/ssl/ca/ca.crt"), - "auto-tls": Equal(false), - }), + "auto-compaction-mode": Equal(string(druidv1alpha1.Periodic)), + "auto-compaction-retention": Equal(DefaultAutoCompactionRetention), })) Expect(*svc).To(MatchFields(IgnoreExtras, Fields{ @@ -1675,25 +1378,25 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi "client": MatchFields(IgnoreExtras, Fields{ "Name": Equal("client"), "Protocol": Equal(corev1.ProtocolTCP), - "Port": Equal(*instance.Spec.Etcd.ClientPort), + "Port": Equal(clientPort), "TargetPort": MatchFields(IgnoreExtras, Fields{ - "IntVal": Equal(*instance.Spec.Etcd.ClientPort), + "IntVal": Equal(clientPort), }), }), "server": MatchFields(IgnoreExtras, Fields{ "Name": Equal("server"), "Protocol": Equal(corev1.ProtocolTCP), - "Port": Equal(*instance.Spec.Etcd.ServerPort), + "Port": Equal(serverPort), "TargetPort": MatchFields(IgnoreExtras, Fields{ - "IntVal": Equal(*instance.Spec.Etcd.ServerPort), + "IntVal": Equal(serverPort), }), }), "backuprestore": MatchFields(IgnoreExtras, Fields{ "Name": Equal("backuprestore"), "Protocol": Equal(corev1.ProtocolTCP), - "Port": Equal(*instance.Spec.Backup.Port), + "Port": Equal(backupPort), "TargetPort": MatchFields(IgnoreExtras, Fields{ - "IntVal": Equal(*instance.Spec.Backup.Port), + "IntVal": Equal(backupPort), }), }), }), @@ -1716,7 +1419,6 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi "instance": Equal(instance.Name), }), }), - "Spec": MatchFields(IgnoreExtras, Fields{ "UpdateStrategy": MatchFields(IgnoreExtras, Fields{ "Type": Equal(appsv1.RollingUpdateStatefulSetStrategyType), @@ -1741,7 +1443,6 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi "instance": Equal(instance.Name), }), }), - //s.Spec.Template.Spec.HostAliases "Spec": MatchFields(IgnoreExtras, Fields{ "HostAliases": MatchAllElements(hostAliasIterator, Elements{ "127.0.0.1": MatchFields(IgnoreExtras, Fields{ @@ -1751,36 +1452,36 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi }), }), }), - "PriorityClassName": Equal(*instance.Spec.PriorityClassName), + "PriorityClassName": Equal(""), "Containers": MatchAllElements(containerIterator, Elements{ common.Etcd: MatchFields(IgnoreExtras, Fields{ "Ports": ConsistOf([]corev1.ContainerPort{ - corev1.ContainerPort{ + { Name: "server", Protocol: corev1.ProtocolTCP, HostPort: 0, - ContainerPort: *instance.Spec.Etcd.ServerPort, + ContainerPort: serverPort, }, - corev1.ContainerPort{ + { Name: "client", Protocol: corev1.ProtocolTCP, HostPort: 0, - ContainerPort: *instance.Spec.Etcd.ClientPort, + ContainerPort: clientPort, }, }), "Command": MatchAllElements(cmdIterator, Elements{ "/var/etcd/bin/bootstrap.sh": Equal("/var/etcd/bin/bootstrap.sh"), }), "ImagePullPolicy": Equal(corev1.PullIfNotPresent), - "Image": Equal(*instance.Spec.Etcd.Image), + "Image": Equal(fmt.Sprintf("%s:%s", images[common.Etcd].Repository, *images[common.Etcd].Tag)), "Resources": MatchFields(IgnoreExtras, Fields{ "Requests": MatchKeys(IgnoreExtras, Keys{ - corev1.ResourceCPU: Equal(instance.Spec.Etcd.Resources.Requests[corev1.ResourceCPU]), - corev1.ResourceMemory: Equal(instance.Spec.Etcd.Resources.Requests[corev1.ResourceMemory]), + corev1.ResourceCPU: Equal(resource.MustParse("50m")), + corev1.ResourceMemory: Equal(resource.MustParse("128Mi")), }), "Limits": MatchKeys(IgnoreExtras, Keys{ - corev1.ResourceCPU: Equal(instance.Spec.Etcd.Resources.Limits[corev1.ResourceCPU]), - corev1.ResourceMemory: Equal(instance.Spec.Etcd.Resources.Limits[corev1.ResourceMemory]), + corev1.ResourceCPU: Equal(resource.MustParse("100m")), + corev1.ResourceMemory: Equal(resource.MustParse("512Gi")), }), }), "ReadinessProbe": PointTo(MatchFields(IgnoreExtras, Fields{ @@ -1790,7 +1491,7 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi "Port": MatchFields(IgnoreExtras, Fields{ "IntVal": Equal(int32(8080)), }), - "Scheme": Equal(corev1.URISchemeHTTPS), + "Scheme": Equal(corev1.URISchemeHTTP), })), }), "InitialDelaySeconds": Equal(int32(15)), @@ -1800,14 +1501,11 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi "Handler": MatchFields(IgnoreExtras, Fields{ "Exec": PointTo(MatchFields(IgnoreExtras, Fields{ "Command": MatchAllElements(cmdIterator, Elements{ - "/bin/sh": Equal("/bin/sh"), - "-ec": Equal("-ec"), - "ETCDCTL_API=3": Equal("ETCDCTL_API=3"), - "etcdctl": Equal("etcdctl"), - "--cert=/var/etcd/ssl/client/tls.crt": Equal("--cert=/var/etcd/ssl/client/tls.crt"), - "--key=/var/etcd/ssl/client/tls.key": Equal("--key=/var/etcd/ssl/client/tls.key"), - "--cacert=/var/etcd/ssl/ca/ca.crt": Equal("--cacert=/var/etcd/ssl/ca/ca.crt"), - fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort)), + "/bin/sh": Equal("/bin/sh"), + "-ec": Equal("-ec"), + "ETCDCTL_API=3": Equal("ETCDCTL_API=3"), + "etcdctl": Equal("etcdctl"), + fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort)), "get": Equal("get"), "foo": Equal("foo"), }), @@ -1817,26 +1515,14 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi "PeriodSeconds": Equal(int32(5)), })), "VolumeMounts": MatchAllElements(volumeMountIterator, Elements{ - *instance.Spec.VolumeClaimTemplate: MatchFields(IgnoreExtras, Fields{ - "Name": Equal(*instance.Spec.VolumeClaimTemplate), + instance.Name: MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Name), "MountPath": Equal("/var/etcd/data/"), }), "etcd-config-file": MatchFields(IgnoreExtras, Fields{ "Name": Equal("etcd-config-file"), "MountPath": Equal("/var/etcd/config/"), }), - "ca-etcd": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("ca-etcd"), - "MountPath": Equal("/var/etcd/ssl/ca"), - }), - "etcd-server-tls": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-server-tls"), - "MountPath": Equal("/var/etcd/ssl/server"), - }), - "etcd-client-tls": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-client-tls"), - "MountPath": Equal("/var/etcd/ssl/client"), - }), }), }), @@ -1844,31 +1530,20 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi "Command": MatchAllElements(cmdIterator, Elements{ "etcdbrctl": Equal("etcdbrctl"), "server": Equal("server"), - "--cert=/var/etcd/ssl/client/tls.crt": Equal("--cert=/var/etcd/ssl/client/tls.crt"), - "--key=/var/etcd/ssl/client/tls.key": Equal("--key=/var/etcd/ssl/client/tls.key"), - "--cacert=/var/etcd/ssl/ca/ca.crt": Equal("--cacert=/var/etcd/ssl/ca/ca.crt"), - "--server-cert=/var/etcd/ssl/server/tls.crt": Equal("--server-cert=/var/etcd/ssl/server/tls.crt"), - "--server-key=/var/etcd/ssl/server/tls.key": Equal("--server-key=/var/etcd/ssl/server/tls.key"), "--data-dir=/var/etcd/data/new.etcd": Equal("--data-dir=/var/etcd/data/new.etcd"), - "--insecure-transport=false": Equal("--insecure-transport=false"), - "--insecure-skip-tls-verify=false": Equal("--insecure-skip-tls-verify=false"), - "--snapstore-temp-directory=/var/etcd/data/temp": Equal("--snapstore-temp-directory=/var/etcd/data/temp"), + "--insecure-transport=true": Equal("--insecure-transport=true"), + "--insecure-skip-tls-verify=true": Equal("--insecure-skip-tls-verify=true"), "--etcd-connection-timeout=5m": Equal("--etcd-connection-timeout=5m"), - fmt.Sprintf("--defragmentation-schedule=%s", *instance.Spec.Etcd.DefragmentationSchedule): Equal(fmt.Sprintf("--defragmentation-schedule=%s", *instance.Spec.Etcd.DefragmentationSchedule)), - fmt.Sprintf("--schedule=%s", *instance.Spec.Backup.FullSnapshotSchedule): Equal(fmt.Sprintf("--schedule=%s", *instance.Spec.Backup.FullSnapshotSchedule)), - fmt.Sprintf("%s=%s", "--garbage-collection-policy", *instance.Spec.Backup.GarbageCollectionPolicy): Equal(fmt.Sprintf("%s=%s", "--garbage-collection-policy", *instance.Spec.Backup.GarbageCollectionPolicy)), - fmt.Sprintf("%s=%s", "--storage-provider", store): Equal(fmt.Sprintf("%s=%s", "--storage-provider", store)), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), - fmt.Sprintf("--delta-snapshot-memory-limit=%d", instance.Spec.Backup.DeltaSnapshotMemoryLimit.Value()): Equal(fmt.Sprintf("--delta-snapshot-memory-limit=%d", instance.Spec.Backup.DeltaSnapshotMemoryLimit.Value())), - fmt.Sprintf("--garbage-collection-policy=%s", *instance.Spec.Backup.GarbageCollectionPolicy): Equal(fmt.Sprintf("--garbage-collection-policy=%s", *instance.Spec.Backup.GarbageCollectionPolicy)), - fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=https://%s-local:%d", instance.Name, clientPort)), - fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value())): Equal(fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(instance.Spec.Etcd.Quota.Value()))), - fmt.Sprintf("%s=%s", "--delta-snapshot-period", instance.Spec.Backup.DeltaSnapshotPeriod.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--delta-snapshot-period", instance.Spec.Backup.DeltaSnapshotPeriod.Duration.String())), - fmt.Sprintf("%s=%s", "--garbage-collection-period", instance.Spec.Backup.GarbageCollectionPeriod.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--garbage-collection-period", instance.Spec.Backup.GarbageCollectionPeriod.Duration.String())), - fmt.Sprintf("%s=%s", "--auto-compaction-mode", *instance.Spec.Common.AutoCompactionMode): Equal(fmt.Sprintf("%s=%s", "--auto-compaction-mode", autoCompactionMode)), - fmt.Sprintf("%s=%s", "--auto-compaction-retention", *instance.Spec.Common.AutoCompactionRetention): Equal(fmt.Sprintf("%s=%s", "--auto-compaction-retention", autoCompactionRetention)), - fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", instance.Spec.Backup.EtcdSnapshotTimeout.Duration.String())), - fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String()): Equal(fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", instance.Spec.Etcd.EtcdDefragTimeout.Duration.String())), + "--snapstore-temp-directory=/var/etcd/data/temp": Equal("--snapstore-temp-directory=/var/etcd/data/temp"), + fmt.Sprintf("--delta-snapshot-memory-limit=%d", deltaSnapShotMemLimit.Value()): Equal(fmt.Sprintf("--delta-snapshot-memory-limit=%d", deltaSnapShotMemLimit.Value())), + fmt.Sprintf("--garbage-collection-policy=%s", druidv1alpha1.GarbageCollectionPolicyLimitBased): Equal(fmt.Sprintf("--garbage-collection-policy=%s", druidv1alpha1.GarbageCollectionPolicyLimitBased)), + fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort): Equal(fmt.Sprintf("--endpoints=http://%s-local:%d", instance.Name, clientPort)), + fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(quota.Value())): Equal(fmt.Sprintf("--embedded-etcd-quota-bytes=%d", int64(quota.Value()))), + fmt.Sprintf("--max-backups=%d", maxBackups): Equal(fmt.Sprintf("--max-backups=%d", maxBackups)), + fmt.Sprintf("--auto-compaction-mode=%s", druidv1alpha1.Periodic): Equal(fmt.Sprintf("--auto-compaction-mode=%s", druidv1alpha1.Periodic)), + fmt.Sprintf("--auto-compaction-retention=%s", DefaultAutoCompactionRetention): Equal(fmt.Sprintf("--auto-compaction-retention=%s", DefaultAutoCompactionRetention)), + fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", "8m"): Equal(fmt.Sprintf("%s=%s", "--etcd-snapshot-timeout", "8m")), + fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", "8m"): Equal(fmt.Sprintf("%s=%s", "--etcd-defrag-timeout", "8m")), }), "Ports": ConsistOf([]corev1.ContainerPort{ corev1.ContainerPort{ @@ -1878,34 +1553,22 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi ContainerPort: backupPort, }, }), - "Image": Equal(*instance.Spec.Backup.Image), + "Image": Equal(fmt.Sprintf("%s:%s", images[common.BackupRestore].Repository, *images[common.BackupRestore].Tag)), "ImagePullPolicy": Equal(corev1.PullIfNotPresent), - "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ - *instance.Spec.VolumeClaimTemplate: MatchFields(IgnoreExtras, Fields{ - "Name": Equal(*instance.Spec.VolumeClaimTemplate), + "VolumeMounts": MatchAllElements(volumeMountIterator, Elements{ + instance.Name: MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Name), "MountPath": Equal("/var/etcd/data"), }), "etcd-config-file": MatchFields(IgnoreExtras, Fields{ "Name": Equal("etcd-config-file"), "MountPath": Equal("/var/etcd/config/"), }), - "ca-etcd": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("ca-etcd"), - "MountPath": Equal("/var/etcd/ssl/ca"), - }), - "etcd-server-tls": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-server-tls"), - "MountPath": Equal("/var/etcd/ssl/server"), - }), - "etcd-client-tls": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-client-tls"), - "MountPath": Equal("/var/etcd/ssl/client"), - }), }), - "Env": MatchElements(envIterator, IgnoreExtras, Elements{ + "Env": MatchAllElements(envIterator, Elements{ "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ "Name": Equal("STORAGE_CONTAINER"), - "Value": Equal(*instance.Spec.Backup.Store.Container), + "Value": Equal(""), }), }), }), @@ -1928,46 +1591,187 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi })), }), }), - "etcd-server-tls": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-server-tls"), - "VolumeSource": MatchFields(IgnoreExtras, Fields{ - "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretName": Equal(instance.Spec.Etcd.TLS.ServerTLSSecretRef.Name), - })), + }), + }), + }), + "VolumeClaimTemplates": MatchAllElements(pvcIterator, Elements{ + instance.Name: MatchFields(IgnoreExtras, Fields{ + "ObjectMeta": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Name), + }), + "Spec": MatchFields(IgnoreExtras, Fields{ + "AccessModes": MatchAllElements(accessModeIterator, Elements{ + "ReadWriteOnce": Equal(corev1.ReadWriteOnce), + }), + "Resources": MatchFields(IgnoreExtras, Fields{ + "Requests": MatchKeys(IgnoreExtras, Keys{ + corev1.ResourceStorage: Equal(defaultStorageCapacity), }), }), - "etcd-client-tls": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-client-tls"), - "VolumeSource": MatchFields(IgnoreExtras, Fields{ - "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretName": Equal(instance.Spec.Etcd.TLS.ClientTLSSecretRef.Name), - })), + }), + }), + }), + }), + })) + +} + +func validateStoreGCPForCmpctJob(instance *druidv1alpha1.Etcd, j *batchv1.Job) { + Expect(*j).To(MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Template": MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ + "compact-backup": MatchFields(IgnoreExtras, Fields{ + "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ + "--storage-provider=GCS": Equal("--storage-provider=GCS"), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), + }), + "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ + "etcd-backup": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-backup"), + "MountPath": Equal("/root/.gcp/"), + }), + }), + "Env": MatchAllElements(envIterator, Elements{ + "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_CONTAINER"), + "Value": Equal(*instance.Spec.Backup.Store.Container), + }), + "GOOGLE_APPLICATION_CREDENTIALS": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("GOOGLE_APPLICATION_CREDENTIALS"), + "Value": Equal("/root/.gcp/serviceaccount.json"), + }), }), }), - "ca-etcd": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("ca-etcd"), + }), + "Volumes": MatchElements(volumeIterator, IgnoreExtras, Elements{ + "etcd-backup": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("etcd-backup"), "VolumeSource": MatchFields(IgnoreExtras, Fields{ "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretName": Equal(instance.Spec.Etcd.TLS.TLSCASecretRef.Name), + "SecretName": Equal(instance.Spec.Backup.Store.SecretRef.Name), })), }), }), }), }), }), - "VolumeClaimTemplates": MatchAllElements(pvcIterator, Elements{ - *instance.Spec.VolumeClaimTemplate: MatchFields(IgnoreExtras, Fields{ - "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Name": Equal(*instance.Spec.VolumeClaimTemplate), - }), - "Spec": MatchFields(IgnoreExtras, Fields{ - "StorageClassName": PointTo(Equal(*instance.Spec.StorageClass)), - "AccessModes": MatchAllElements(accessModeIterator, Elements{ - "ReadWriteOnce": Equal(corev1.ReadWriteOnce), + }), + })) +} +func validateStoreAWSForETCDApp(instance *druidv1alpha1.Etcd, s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service) { + Expect(*s).To(MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Template": MatchFields(IgnoreExtras, Fields{ + //s.Spec.Template.Spec.HostAliases + "Spec": MatchFields(IgnoreExtras, Fields{ + "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ + + backupRestore: MatchFields(IgnoreExtras, Fields{ + "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ + "--storage-provider=S3": Equal("--storage-provider=S3"), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + }), + "ImagePullPolicy": Equal(corev1.PullIfNotPresent), + "Env": MatchAllElements(envIterator, Elements{ + "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_CONTAINER"), + "Value": Equal(*instance.Spec.Backup.Store.Container), + }), + "AWS_REGION": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("AWS_REGION"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("region"), + })), + })), + }), + "AWS_SECRET_ACCESS_KEY": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("AWS_SECRET_ACCESS_KEY"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("secretAccessKey"), + })), + })), + }), + "AWS_ACCESS_KEY_ID": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("AWS_ACCESS_KEY_ID"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("accessKeyID"), + })), + })), + }), + }), }), - "Resources": MatchFields(IgnoreExtras, Fields{ - "Requests": MatchKeys(IgnoreExtras, Keys{ - corev1.ResourceStorage: Equal(*instance.Spec.StorageCapacity), + }), + }), + }), + }), + })) +} + +func validateStoreAWSForCmpctJob(instance *druidv1alpha1.Etcd, j *batchv1.Job) { + Expect(*j).To(MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Template": MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ + "compact-backup": MatchFields(IgnoreExtras, Fields{ + "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ + "--storage-provider=S3": Equal("--storage-provider=S3"), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), + }), + "Env": MatchAllElements(envIterator, Elements{ + "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_CONTAINER"), + "Value": Equal(*instance.Spec.Backup.Store.Container), + }), + "AWS_REGION": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("AWS_REGION"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("region"), + })), + })), + }), + "AWS_SECRET_ACCESS_KEY": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("AWS_SECRET_ACCESS_KEY"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("secretAccessKey"), + })), + })), + }), + "AWS_ACCESS_KEY_ID": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("AWS_ACCESS_KEY_ID"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("accessKeyID"), + })), + })), + }), }), }), }), @@ -1977,8 +1781,7 @@ func validateEtcd(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Servi })) } -func validateStoreGCP(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, instance *druidv1alpha1.Etcd) { - +func validateStoreAzureForETCDApp(instance *druidv1alpha1.Etcd, s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service) { Expect(*s).To(MatchFields(IgnoreExtras, Fields{ "Spec": MatchFields(IgnoreExtras, Fields{ "Template": MatchFields(IgnoreExtras, Fields{ @@ -1987,34 +1790,36 @@ func validateStoreGCP(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.S "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ backupRestore: MatchFields(IgnoreExtras, Fields{ "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--storage-provider=GCS": Equal("--storage-provider=GCS"), + "--storage-provider=ABS": Equal("--storage-provider=ABS"), fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), }), - "VolumeMounts": MatchElements(volumeMountIterator, IgnoreExtras, Elements{ - "etcd-backup": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-backup"), - "MountPath": Equal("/root/.gcp/"), - }), - }), "Env": MatchAllElements(envIterator, Elements{ "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ "Name": Equal("STORAGE_CONTAINER"), "Value": Equal(*instance.Spec.Backup.Store.Container), }), - "GOOGLE_APPLICATION_CREDENTIALS": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("GOOGLE_APPLICATION_CREDENTIALS"), - "Value": Equal("/root/.gcp/serviceaccount.json"), + "STORAGE_ACCOUNT": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_ACCOUNT"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("storageAccount"), + })), + })), + }), + "STORAGE_KEY": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_KEY"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("storageKey"), + })), + })), }), - }), - }), - }), - "Volumes": MatchElements(volumeIterator, IgnoreExtras, Elements{ - "etcd-backup": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("etcd-backup"), - "VolumeSource": MatchFields(IgnoreExtras, Fields{ - "Secret": PointTo(MatchFields(IgnoreExtras, Fields{ - "SecretName": Equal(instance.Spec.Backup.Store.SecretRef.Name), - })), }), }), }), @@ -2022,20 +1827,19 @@ func validateStoreGCP(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.S }), }), })) - } -func validateStoreAzure(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, instance *druidv1alpha1.Etcd) { - Expect(*s).To(MatchFields(IgnoreExtras, Fields{ +func validateStoreAzureForCmpctJob(instance *druidv1alpha1.Etcd, j *batchv1.Job) { + Expect(*j).To(MatchFields(IgnoreExtras, Fields{ "Spec": MatchFields(IgnoreExtras, Fields{ "Template": MatchFields(IgnoreExtras, Fields{ - //s.Spec.Template.Spec.HostAliases "Spec": MatchFields(IgnoreExtras, Fields{ "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - backupRestore: MatchFields(IgnoreExtras, Fields{ + "compact-backup": MatchFields(IgnoreExtras, Fields{ "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ "--storage-provider=ABS": Equal("--storage-provider=ABS"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), }), "Env": MatchAllElements(envIterator, Elements{ "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ @@ -2073,7 +1877,7 @@ func validateStoreAzure(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1 })) } -func validateStoreOpenstack(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, instance *druidv1alpha1.Etcd) { +func validateStoreOpenstackForETCDApp(instance *druidv1alpha1.Etcd, s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service) { Expect(*s).To(MatchFields(IgnoreExtras, Fields{ "Spec": MatchFields(IgnoreExtras, Fields{ "Template": MatchFields(IgnoreExtras, Fields{ @@ -2154,7 +1958,88 @@ func validateStoreOpenstack(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *co })) } -func validateStoreAlicloud(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, instance *druidv1alpha1.Etcd) { +func validateStoreOpenstackForCmpctJob(instance *druidv1alpha1.Etcd, j *batchv1.Job) { + Expect(*j).To(MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Template": MatchFields(IgnoreExtras, Fields{ + "Spec": MatchFields(IgnoreExtras, Fields{ + "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ + "compact-backup": MatchFields(IgnoreExtras, Fields{ + "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ + "--storage-provider=Swift": Equal("--storage-provider=Swift"), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), + }), + "Env": MatchAllElements(envIterator, Elements{ + "STORAGE_CONTAINER": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("STORAGE_CONTAINER"), + "Value": Equal(*instance.Spec.Backup.Store.Container), + }), + "OS_AUTH_URL": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("OS_AUTH_URL"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("authURL"), + })), + })), + }), + "OS_USERNAME": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("OS_USERNAME"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("username"), + })), + })), + }), + "OS_TENANT_NAME": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("OS_TENANT_NAME"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("tenantName"), + })), + })), + }), + "OS_PASSWORD": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("OS_PASSWORD"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("password"), + })), + })), + }), + "OS_DOMAIN_NAME": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("OS_DOMAIN_NAME"), + "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ + "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ + "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), + }), + "Key": Equal("domainName"), + })), + })), + }), + }), + }), + }), + }), + }), + }), + })) +} + +func validateStoreAlicloudForETCDApp(instance *druidv1alpha1.Etcd, s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service) { Expect(*s).To(MatchFields(IgnoreExtras, Fields{ "Spec": MatchFields(IgnoreExtras, Fields{ "Template": MatchFields(IgnoreExtras, Fields{ @@ -2215,18 +2100,17 @@ func validateStoreAlicloud(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *cor })) } -func validateStoreAWS(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.Service, instance *druidv1alpha1.Etcd) { - Expect(*s).To(MatchFields(IgnoreExtras, Fields{ +func validateStoreAlicloudForCmpctJob(instance *druidv1alpha1.Etcd, j *batchv1.Job) { + Expect(*j).To(MatchFields(IgnoreExtras, Fields{ "Spec": MatchFields(IgnoreExtras, Fields{ "Template": MatchFields(IgnoreExtras, Fields{ - //s.Spec.Template.Spec.HostAliases "Spec": MatchFields(IgnoreExtras, Fields{ "Containers": MatchElements(containerIterator, IgnoreExtras, Elements{ - - backupRestore: MatchFields(IgnoreExtras, Fields{ + "compact-backup": MatchFields(IgnoreExtras, Fields{ "Command": MatchElements(cmdIterator, IgnoreExtras, Elements{ - "--storage-provider=S3": Equal("--storage-provider=S3"), - fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + "--storage-provider=OSS": Equal("--storage-provider=OSS"), + fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix): Equal(fmt.Sprintf("%s=%s", "--store-prefix", instance.Spec.Backup.Store.Prefix)), + fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container): Equal(fmt.Sprintf("%s=%s", "--store-container", *instance.Spec.Backup.Store.Container)), }), "ImagePullPolicy": Equal(corev1.PullIfNotPresent), "Env": MatchAllElements(envIterator, Elements{ @@ -2234,30 +2118,30 @@ func validateStoreAWS(s *appsv1.StatefulSet, cm *corev1.ConfigMap, svc *corev1.S "Name": Equal("STORAGE_CONTAINER"), "Value": Equal(*instance.Spec.Backup.Store.Container), }), - "AWS_REGION": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("AWS_REGION"), + "ALICLOUD_ENDPOINT": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("ALICLOUD_ENDPOINT"), "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), }), - "Key": Equal("region"), + "Key": Equal("storageEndpoint"), })), })), }), - "AWS_SECRET_ACCESS_KEY": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("AWS_SECRET_ACCESS_KEY"), + "ALICLOUD_ACCESS_KEY_SECRET": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("ALICLOUD_ACCESS_KEY_SECRET"), "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ "Name": Equal(instance.Spec.Backup.Store.SecretRef.Name), }), - "Key": Equal("secretAccessKey"), + "Key": Equal("accessKeySecret"), })), })), }), - "AWS_ACCESS_KEY_ID": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("AWS_ACCESS_KEY_ID"), + "ALICLOUD_ACCESS_KEY_ID": MatchFields(IgnoreExtras, Fields{ + "Name": Equal("ALICLOUD_ACCESS_KEY_ID"), "ValueFrom": PointTo(MatchFields(IgnoreExtras, Fields{ "SecretKeyRef": PointTo(MatchFields(IgnoreExtras, Fields{ "LocalObjectReference": MatchFields(IgnoreExtras, Fields{ @@ -2352,15 +2236,43 @@ func statefulSetRemoved(c client.Client, ss *appsv1.StatefulSet) error { return fmt.Errorf("statefulset not removed") } -func cronJobRemoved(c client.Client, cj *batchv1.CronJob) error { +/*func createJob(name, namespace string, labels map[string]string) *batchv1.Job { + j := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: v1.JobSpec{ + BackoffLimit: pointer.Int32Ptr(0), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: "Never", + Containers: []corev1.Container{ + { + Name: "compact-backup", + Image: "eu.gcr.io/gardener-project/gardener/etcdbrctl:v0.12.0", + }, + }, + }, + }, + }, + } + return &j +} + +func jobRemoved(c client.Client, j *batchv1.Job) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() - cronjob := &batchv1.CronJob{} + job := &batchv1.Job{} req := types.NamespacedName{ - Name: cj.Name, - Namespace: cj.Namespace, + Name: j.Name, + Namespace: j.Namespace, } - if err := c.Get(ctx, req, cronjob); err != nil { + if err := c.Get(ctx, req, job); err != nil { if errors.IsNotFound(err) { // Object not found, return. Created objects are automatically garbage collected. // For additional cleanup logic use finalizers @@ -2370,6 +2282,7 @@ func cronJobRemoved(c client.Client, cj *batchv1.CronJob) error { } return fmt.Errorf("statefulset not removed") } +*/ func statefulsetIsCorrectlyReconciled(c client.Client, instance *druidv1alpha1.Etcd, ss *appsv1.StatefulSet) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) @@ -2386,7 +2299,7 @@ func statefulsetIsCorrectlyReconciled(c client.Client, instance *druidv1alpha1.E return fmt.Errorf("no annotations") } if checkEtcdOwnerReference(ss.GetOwnerReferences(), instance) { - return fmt.Errorf("ownerReference exists") + return fmt.Errorf("ownerReference exists for statefulset") } return nil } @@ -2404,7 +2317,7 @@ func configMapIsCorrectlyReconciled(c client.Client, instance *druidv1alpha1.Etc } if !checkEtcdOwnerReference(cm.GetOwnerReferences(), instance) { - return fmt.Errorf("ownerReference does not exists") + return fmt.Errorf("ownerReference does not exists for configmap") } return nil } @@ -2422,56 +2335,41 @@ func serviceIsCorrectlyReconciled(c client.Client, instance *druidv1alpha1.Etcd, } if !checkEtcdOwnerReference(svc.GetOwnerReferences(), instance) { - return fmt.Errorf("ownerReference does not exists") + return fmt.Errorf("ownerReference does not exists for service") } return nil } -func cronJobIsCorrectlyReconciled(c client.Client, instance *druidv1alpha1.Etcd, cj *batchv1.CronJob) error { +func deltaLeaseIsCorrectlyReconciled(c client.Client, instance *druidv1alpha1.Etcd, lease *coordinationv1.Lease) error { ctx, cancel := context.WithTimeout(context.TODO(), timeout) defer cancel() req := types.NamespacedName{ - Name: getCronJobName(instance), + Name: getDeltaLease(instance), Namespace: instance.Namespace, } - if err := c.Get(ctx, req, cj); err != nil { + if err := c.Get(ctx, req, lease); err != nil { return err } + + if !checkEtcdOwnerReference(lease.GetOwnerReferences(), instance) { + return fmt.Errorf("ownerReference does not exists for lease") + } return nil } -func createCronJob(name, namespace string, labels map[string]string) *batchv1.CronJob { - cj := batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - }, - Spec: batchv1.CronJobSpec{ - Schedule: backupCompactionSchedule, - ConcurrencyPolicy: "Forbid", - JobTemplate: batchv1.JobTemplateSpec{ - Spec: v1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - RestartPolicy: "Never", - Containers: []corev1.Container{ - { - Name: "compact-backup", - Image: "eu.gcr.io/gardener-project/gardener/etcdbrctl:v0.12.0", - }, - }, - }, - }, - }, - }, - }, +func jobIsCorrectlyReconciled(c client.Client, instance *druidv1alpha1.Etcd, j *batchv1.Job) error { + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + req := types.NamespacedName{ + Name: getJobName(instance), + Namespace: instance.Namespace, + } + + if err := c.Get(ctx, req, j); err != nil { + return err } - return &cj + return nil } func createStatefulset(name, namespace string, labels map[string]string) *appsv1.StatefulSet { @@ -2561,44 +2459,8 @@ func createPod(name, namespace string, labels map[string]string) *corev1.Pod { return &pod } -func getEtcdWithCmpctScheduleTLS(name, namespace string) *druidv1alpha1.Etcd { - etcd := getEtcdWithTLS(name, namespace) - etcd.Spec.Backup.BackupCompactionSchedule = &backupCompactionSchedule - return etcd -} - -func getEtcdWithCmpctScheduleGCS(name, namespace string) *druidv1alpha1.Etcd { - etcd := getEtcdWithGCS(name, namespace) - etcd.Spec.Backup.BackupCompactionSchedule = &backupCompactionSchedule - return etcd -} - -func getEtcdWithCmpctScheduleS3(name, namespace string) *druidv1alpha1.Etcd { - etcd := getEtcdWithS3(name, namespace) - etcd.Spec.Backup.BackupCompactionSchedule = &backupCompactionSchedule - return etcd -} - -func getEtcdWithCmpctScheduleABS(name, namespace string) *druidv1alpha1.Etcd { - etcd := getEtcdWithABS(name, namespace) - etcd.Spec.Backup.BackupCompactionSchedule = &backupCompactionSchedule - return etcd -} - -func getEtcdWithCmpctScheduleSwift(name, namespace string) *druidv1alpha1.Etcd { - etcd := getEtcdWithSwift(name, namespace) - etcd.Spec.Backup.BackupCompactionSchedule = &backupCompactionSchedule - return etcd -} - -func getEtcdWithCmpctScheduleOSS(name, namespace string) *druidv1alpha1.Etcd { - etcd := getEtcdWithOSS(name, namespace) - etcd.Spec.Backup.BackupCompactionSchedule = &backupCompactionSchedule - return etcd -} - func getEtcdWithGCS(name, namespace string) *druidv1alpha1.Etcd { - provider := druidv1alpha1.StorageProvider("gcp") + provider := druidv1alpha1.StorageProvider("GCS") etcd := getEtcdWithTLS(name, namespace) etcd.Spec.Backup.Store = &druidv1alpha1.StoreSpec{ Container: &container, @@ -2612,7 +2474,7 @@ func getEtcdWithGCS(name, namespace string) *druidv1alpha1.Etcd { } func getEtcdWithABS(name, namespace string) *druidv1alpha1.Etcd { - provider := druidv1alpha1.StorageProvider("azure") + provider := druidv1alpha1.StorageProvider("ABS") etcd := getEtcdWithTLS(name, namespace) etcd.Spec.Backup.Store = &druidv1alpha1.StoreSpec{ Container: &container, @@ -2626,7 +2488,7 @@ func getEtcdWithABS(name, namespace string) *druidv1alpha1.Etcd { } func getEtcdWithS3(name, namespace string) *druidv1alpha1.Etcd { - provider := druidv1alpha1.StorageProvider("aws") + provider := druidv1alpha1.StorageProvider("S3") etcd := getEtcdWithTLS(name, namespace) etcd.Spec.Backup.Store = &druidv1alpha1.StoreSpec{ Container: &container, @@ -2640,7 +2502,7 @@ func getEtcdWithS3(name, namespace string) *druidv1alpha1.Etcd { } func getEtcdWithSwift(name, namespace string) *druidv1alpha1.Etcd { - provider := druidv1alpha1.StorageProvider("openstack") + provider := druidv1alpha1.StorageProvider("Swift") etcd := getEtcdWithTLS(name, namespace) etcd.Spec.Backup.Store = &druidv1alpha1.StoreSpec{ Container: &container, @@ -2655,7 +2517,7 @@ func getEtcdWithSwift(name, namespace string) *druidv1alpha1.Etcd { func getEtcdWithOSS(name, namespace string) *druidv1alpha1.Etcd { container := fmt.Sprintf("%s-container", name) - provider := druidv1alpha1.StorageProvider("alicloud") + provider := druidv1alpha1.StorageProvider("OSS") etcd := getEtcdWithTLS(name, namespace) etcd.Spec.Backup.Store = &druidv1alpha1.StoreSpec{ Container: &container, @@ -2751,6 +2613,16 @@ func getEtcd(name, namespace string, tlsEnabled bool) *druidv1alpha1.Etcd { "memory": parseQuantity("128Mi"), }, }, + CompactionResources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": parseQuantity("500m"), + "memory": parseQuantity("3Gi"), + }, + Limits: corev1.ResourceList{ + "cpu": parseQuantity("700m"), + "memory": parseQuantity("4Gi"), + }, + }, Store: &druidv1alpha1.StoreSpec{ SecretRef: &corev1.SecretReference{ Name: "etcd-backup", diff --git a/controllers/lease_controller.go b/controllers/lease_controller.go new file mode 100644 index 000000000..35418c016 --- /dev/null +++ b/controllers/lease_controller.go @@ -0,0 +1,456 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/go-logr/logr" + batchv1 "k8s.io/api/batch/v1" + coordinationv1 "k8s.io/api/coordination/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + errorsutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + + druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" + controllersconfig "github.com/gardener/etcd-druid/controllers/config" + kutil "github.com/gardener/gardener/pkg/utils/kubernetes" +) + +// LeaseController reconciles compaction job +type LeaseController struct { + client.Client + Scheme *runtime.Scheme + logger logr.Logger + config controllersconfig.CompactionConfig +} + +// NewLeaseController creates a new LeaseController object +func NewLeaseController(mgr manager.Manager, config controllersconfig.CompactionConfig) *LeaseController { + return &LeaseController{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + logger: log.Log.WithName("lease-controller"), + config: config, + } +} + +// +kubebuilder:rbac:groups=druid.gardener.cloud,resources=etcds,verbs=get;list;watch +// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;create;list;watch;update;patch;delete + +// Reconcile reconciles the compaction job. +func (lc *LeaseController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + lc.logger.Info("Lease controller reconciliation started") + etcd := &druidv1alpha1.Etcd{} + if err := lc.Get(ctx, req.NamespacedName, etcd); err != nil { + if errors.IsNotFound(err) { + // Object not found, return. Created objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + return ctrl.Result{ + RequeueAfter: 10 * time.Second, + }, err + } + // Error reading the object - requeue the request. + return ctrl.Result{ + RequeueAfter: 10 * time.Second, + }, err + } + + logger := lc.logger.WithValues("etcd", kutil.Key(etcd.Namespace, etcd.Name).String()) + + // Get or Create delta_snapshot_revisions lease object that will keep track of delta snapshot revisions based on which + // compaction job will be scheduled + nsName := types.NamespacedName{ + Name: getDeltaLease(etcd), + Namespace: etcd.Namespace, + } + + deltaLease := &coordinationv1.Lease{} + err := lc.Get(ctx, nsName, deltaLease) + if err != nil { + logger.Info("Couldn't fetch delta snap lease because: " + err.Error()) + + return ctrl.Result{ + RequeueAfter: 10 * time.Second, + }, err + } + + // Run compaction job + if etcd.Spec.Backup.Store != nil { + hi, err := strconv.ParseInt(*deltaLease.Spec.HolderIdentity, 10, 32) + if err != nil { + logger.Error(err, "Can't convert holder identity of revision lease to integer") + return ctrl.Result{ + RequeueAfter: 10 * time.Second, + }, err + } + // Reconcile job only when current revision is more than 1 million (this vale is configurable through `events-threshold` option in druid) than + // the last snapshot (full/compact) revision + if hi >= lc.config.EventsThreshold { + j, err := lc.reconcileJob(ctx, logger, etcd) + if err != nil { + return ctrl.Result{ + RequeueAfter: 5 * time.Second, + }, err + } + logger.Info("Current compaction job is: " + j.Name) + } + } + + return ctrl.Result{ + Requeue: false, + }, nil +} + +func (lc *LeaseController) reconcileJob(ctx context.Context, logger logr.Logger, etcd *druidv1alpha1.Etcd) (*batchv1.Job, error) { + logger.Info("Reconcile etcd compaction job") + + // First check if a job is already running + job := &batchv1.Job{} + err := lc.Get(ctx, types.NamespacedName{Name: getJobName(etcd), Namespace: etcd.Namespace}, job) + + if err != nil { + if errors.IsNotFound(err) { + // Required job doesn't exist. Create new + job, err = lc.CreateCompactJob(ctx, logger, etcd) + if err != nil { + logger.Error(err, "error during compaction job creation") + return nil, err + } + } else { + return nil, err + } + } + + // Delete job and reque if the job failed + if job.Status.Failed > 0 { + err = lc.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationForeground)) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("job status failed") + } + + // Delete job and return if the job succeeded + if job.Status.Succeeded > 0 { + err = lc.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationForeground)) + if err != nil { + return nil, err + } + return job, nil + } + + // Delete job and reque if the job doesn't have label with this etcd instance + if val, ok := job.Labels["instance"]; ok { + if val != etcd.Name { + err = lc.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationForeground)) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("job doesn't have label for this etcd instance") + } + } + + return job, nil +} + +func (lc *LeaseController) CreateCompactJob(ctx context.Context, logger logr.Logger, etcd *druidv1alpha1.Etcd) (*batchv1.Job, error) { + /*resourceValues := backupValues["compactionResources"].(map[string]interface{}) + requestValues := resourceValues["requests"].(map[string]interface{}) + limitValues := resourceValues["limits"].(map[string]interface{})*/ + + activeDeadlineSeconds, err := time.ParseDuration(lc.config.ActiveDeadlineDuration) + if err != nil { + return nil, fmt.Errorf("couldn't parse activeDeadlineDuration string from custodian config: %v", err) + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: getJobName(etcd), + Namespace: etcd.Namespace, + Labels: getLabels(etcd), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "druid.gardener.cloud/v1alpha1", + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + Kind: "Etcd", + Name: etcd.Name, + UID: etcd.UID, + }, + }, + }, + + Spec: batchv1.JobSpec{ + ActiveDeadlineSeconds: pointer.Int64Ptr(int64(activeDeadlineSeconds.Seconds())), + BackoffLimit: pointer.Int32Ptr(0), + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: etcd.Spec.Annotations, + Labels: getLabels(etcd), + }, + Spec: v1.PodSpec{ + //ServiceAccountName: fmt.Sprint(values["serviceAccountName"]), + RestartPolicy: v1.RestartPolicyNever, + Containers: []v1.Container{{ + Name: "compact-backup", + Image: *etcd.Spec.Backup.Image, + ImagePullPolicy: v1.PullIfNotPresent, + Command: getCompactJobCommands(etcd), + VolumeMounts: getCmpctJobVolumeMounts(etcd), + Env: getCmpctJobEnvVar(etcd), + }}, + Volumes: getCmpctJobVolumes(etcd), + }, + }, + }, + } + + if etcd.Spec.Backup.CompactionResources != nil { + job.Spec.Template.Spec.Containers[0].Resources = *etcd.Spec.Backup.CompactionResources + } + + logger.Info("Creating job", "job", kutil.Key(job.Namespace, job.Name).String()) + err = lc.Create(ctx, job) + + // Ignore the precondition violated error, this machine is already updated + // with the desired label. + if err == errorsutil.ErrPreconditionViolated { + logger.Info("Job precondition doesn't hold, skip updating it", "job", kutil.Key(job.Namespace, job.Name).String()) + err = nil + } + if err != nil { + return nil, err + } + + //TODO (abdasgupta): Evaluate necessity of claiming object here after creation + return job, nil +} + +func getJobName(etcd *druidv1alpha1.Etcd) string { + return fmt.Sprintf("%s-compact-backup", string(etcd.UID[:6])) +} + +func getLabels(etcd *druidv1alpha1.Etcd) map[string]string { + return map[string]string{ + "name": "etcd", + "instance": etcd.Name, + } +} +func getCmpctJobVolumeMounts(etcd *druidv1alpha1.Etcd) []v1.VolumeMount { + vms := []v1.VolumeMount{ + { + Name: "etcd-workspace-dir", + MountPath: "/var/etcd/data", + }, + } + + if etcd.Spec.Backup.Store == nil { + return vms + } + + storeValues := etcd.Spec.Backup.Store + + if *storeValues.Provider == "GCS" { + vms = append(vms, v1.VolumeMount{ + Name: "etcd-backup", + MountPath: "/root/.gcp/", + }) + } + + return vms +} + +func getCmpctJobVolumes(etcd *druidv1alpha1.Etcd) []v1.Volume { + vs := []v1.Volume{ + { + Name: "etcd-workspace-dir", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + } + + if etcd.Spec.Backup.Store == nil { + return vs + } + + storeValues := etcd.Spec.Backup.Store + + if *storeValues.Provider == "GCS" { + vs = append(vs, v1.Volume{ + Name: "etcd-backup", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: storeValues.SecretRef.Name, + }, + }, + }) + } + + return vs +} + +func getCmpctJobEnvVar(etcd *druidv1alpha1.Etcd) []v1.EnvVar { + var env []v1.EnvVar + if etcd.Spec.Backup.Store == nil { + return env + } + + storeValues := etcd.Spec.Backup.Store + + env = append(env, getEnvVarFromValues("STORAGE_CONTAINER", *storeValues.Container)) + //env = append(env, getEnvVarFromFields("POD_NAMESPACE", "metadata.namespace")) + + if *storeValues.Provider == "S3" { + env = append(env, getEnvVarFromSecrets("AWS_REGION", storeValues.SecretRef.Name, "region")) + env = append(env, getEnvVarFromSecrets("AWS_SECRET_ACCESS_KEY", storeValues.SecretRef.Name, "secretAccessKey")) + env = append(env, getEnvVarFromSecrets("AWS_ACCESS_KEY_ID", storeValues.SecretRef.Name, "accessKeyID")) + } + + if *storeValues.Provider == "ABS" { + env = append(env, getEnvVarFromSecrets("STORAGE_ACCOUNT", storeValues.SecretRef.Name, "storageAccount")) + env = append(env, getEnvVarFromSecrets("STORAGE_KEY", storeValues.SecretRef.Name, "storageKey")) + } + + if *storeValues.Provider == "GCS" { + env = append(env, getEnvVarFromValues("GOOGLE_APPLICATION_CREDENTIALS", "/root/.gcp/serviceaccount.json")) + } + + if *storeValues.Provider == "Swift" { + env = append(env, getEnvVarFromSecrets("OS_AUTH_URL", storeValues.SecretRef.Name, "authURL")) + env = append(env, getEnvVarFromSecrets("OS_DOMAIN_NAME", storeValues.SecretRef.Name, "domainName")) + env = append(env, getEnvVarFromSecrets("OS_USERNAME", storeValues.SecretRef.Name, "username")) + env = append(env, getEnvVarFromSecrets("OS_PASSWORD", storeValues.SecretRef.Name, "password")) + env = append(env, getEnvVarFromSecrets("OS_TENANT_NAME", storeValues.SecretRef.Name, "tenantName")) + } + + if *storeValues.Provider == "OSS" { + env = append(env, getEnvVarFromSecrets("ALICLOUD_ENDPOINT", storeValues.SecretRef.Name, "storageEndpoint")) + env = append(env, getEnvVarFromSecrets("ALICLOUD_ACCESS_KEY_SECRET", storeValues.SecretRef.Name, "accessKeySecret")) + env = append(env, getEnvVarFromSecrets("ALICLOUD_ACCESS_KEY_ID", storeValues.SecretRef.Name, "accessKeyID")) + } + + if *storeValues.Provider == "ECS" { + env = append(env, getEnvVarFromSecrets("ECS_ENDPOINT", storeValues.SecretRef.Name, "endpoint")) + env = append(env, getEnvVarFromSecrets("ECS_ACCESS_KEY_ID", storeValues.SecretRef.Name, "accessKeyID")) + env = append(env, getEnvVarFromSecrets("ECS_SECRET_ACCESS_KEY", storeValues.SecretRef.Name, "secretAccessKey")) + } + + if *storeValues.Provider == "OCS" { + env = append(env, getEnvVarFromSecrets("OCS_ENDPOINT", storeValues.SecretRef.Name, "endpoint")) + env = append(env, getEnvVarFromSecrets("OCS_ACCESS_KEY_ID", storeValues.SecretRef.Name, "accessKeyID")) + env = append(env, getEnvVarFromSecrets("OCS_SECRET_ACCESS_KEY", storeValues.SecretRef.Name, "secretAccessKey")) + env = append(env, getEnvVarFromSecrets("OCS_REGION", storeValues.SecretRef.Name, "region")) + env = append(env, getEnvVarFromSecrets("OCS_DISABLE_SSL", storeValues.SecretRef.Name, "disableSSL")) + env = append(env, getEnvVarFromSecrets("OCS_INSECURE_SKIP_VERIFY", storeValues.SecretRef.Name, "insecureSkipVerify")) + } + + return env +} + +func getEnvVarFromValues(name, value string) v1.EnvVar { + return v1.EnvVar{ + Name: name, + Value: value, + } +} + +/*func getEnvVarFromFields(name, fieldPath string) v1.EnvVar { + return v1.EnvVar{ + Name: name, + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: fieldPath, + }, + }, + } +}*/ + +func getEnvVarFromSecrets(name, secretName, secretKey string) v1.EnvVar { + return v1.EnvVar{ + Name: name, + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: secretName, + }, + Key: secretKey, + }, + }, + } +} + +func getCompactJobCommands(etcd *druidv1alpha1.Etcd) []string { + command := []string{"" + "etcdbrctl" + " " + "compact"} + command = append(command, "--data-dir=/var/etcd/data") + command = append(command, "--snapstore-temp-directory=/var/etcd/data/tmp") + + var quota int64 = 8 * 1024 * 1024 * 1024 // 8Gi + if etcd.Spec.Etcd.Quota != nil { + quota = etcd.Spec.Etcd.Quota.Value() + } + command = append(command, "--embedded-etcd-quota-bytes="+fmt.Sprint(quota)) + + if etcd.Spec.Etcd.EtcdDefragTimeout != nil { + command = append(command, "--etcd-defrag-timeout="+etcd.Spec.Etcd.EtcdDefragTimeout.Duration.String()) + } + + backupValues := etcd.Spec.Backup + if backupValues.EtcdSnapshotTimeout != nil { + command = append(command, "--etcd-snapshot-timeout="+backupValues.EtcdSnapshotTimeout.Duration.String()) + } + storeValues := etcd.Spec.Backup.Store + if storeValues != nil { + if storeValues.Provider != nil { + command = append(command, "--storage-provider="+string(*storeValues.Provider)) + } + + if storeValues.Prefix != "" { + command = append(command, "--store-prefix="+storeValues.Prefix) + } + + if storeValues.Container != nil { + command = append(command, "--store-container="+*(storeValues.Container)) + } + } + + return command +} + +// SetupWithManager sets up manager with a new controller and ec as the reconcile.Reconciler +func (lc *LeaseController) SetupWithManager(ctx context.Context, mgr ctrl.Manager, workers int) error { + builder := ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{ + MaxConcurrentReconciles: workers, + }) + + return builder. + For(&druidv1alpha1.Etcd{}). + Owns(&coordinationv1.Lease{}). + Complete(lc) +} diff --git a/main.go b/main.go index ea41a7185..3bd55ecb5 100644 --- a/main.go +++ b/main.go @@ -50,16 +50,17 @@ func init() { func main() { var ( - metricsAddr string - enableLeaderElection bool - leaderElectionID string - leaderElectionResourceLock string - etcdWorkers int - custodianWorkers int - custodianSyncPeriod time.Duration - disableLeaseCache bool - ignoreOperationAnnotation bool - enableBackupCompactionJobTempFS bool + metricsAddr string + enableLeaderElection bool + leaderElectionID string + leaderElectionResourceLock string + etcdWorkers int + custodianWorkers int + custodianSyncPeriod time.Duration + disableLeaseCache bool + eventsThreshold int64 + activeDeadlineDuration string + ignoreOperationAnnotation bool etcdMemberNotReadyThreshold time.Duration @@ -71,6 +72,8 @@ func main() { flag.IntVar(&etcdWorkers, "workers", 3, "Number of worker threads of the etcd controller.") flag.IntVar(&custodianWorkers, "custodian-workers", 3, "Number of worker threads of the custodian controller.") flag.DurationVar(&custodianSyncPeriod, "custodian-sync-period", 30*time.Second, "Sync period of the custodian controller.") + flag.Int64Var(&eventsThreshold, "events-threshold", 1000000, "Total number of events that can be allowed before a compaction job is triggered") + flag.StringVar(&activeDeadlineDuration, "active-deadline-duration", "3h", "Duration after which a running compaction job will be killed (Ex: \"300ms\", \"20s\", \"-1.5h\" or \"2h45m\")") flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") @@ -81,7 +84,6 @@ func main() { flag.BoolVar(&disableLeaseCache, "disable-lease-cache", false, "Disable cache for lease.coordination.k8s.io resources.") flag.BoolVar(&ignoreOperationAnnotation, "ignore-operation-annotation", true, "Ignore the operation annotation or not.") flag.DurationVar(&etcdMemberNotReadyThreshold, "etcd-member-notready-threshold", 5*time.Minute, "Threshold after which an etcd member is considered not ready if the status was unknown before.") - flag.BoolVar(&enableBackupCompactionJobTempFS, "enable-backup-compaction-job-tempfs", false, "Enable the backup compaction job to use tempfs as its volume mount") flag.Parse() @@ -108,9 +110,9 @@ func main() { os.Exit(1) } - etcd, err := controllers.NewEtcdReconcilerWithImageVector(mgr, enableBackupCompactionJobTempFS) + etcd, err := controllers.NewEtcdReconcilerWithImageVector(mgr) if err != nil { - setupLog.Error(err, "Unable to initialize controller with image vector") + setupLog.Error(err, "Unable to initialize etcd controller with image vector") os.Exit(1) } @@ -126,11 +128,31 @@ func main() { SyncPeriod: custodianSyncPeriod, }) + if err != nil { + setupLog.Error(err, "Unable to initialize etcd custodian controller with image vector") + os.Exit(1) + } + if err := custodian.SetupWithManager(ctx, mgr, custodianWorkers); err != nil { setupLog.Error(err, "Unable to create controller", "Controller", "Etcd Custodian") os.Exit(1) } + lc := controllers.NewLeaseController(mgr, controllersconfig.CompactionConfig{ + EventsThreshold: 1000000, + ActiveDeadlineDuration: "2m", + }) + + if err != nil { + setupLog.Error(err, "Unable to initialize lease controller") + os.Exit(1) + } + + if err := lc.SetupWithManager(ctx, mgr, custodianWorkers); err != nil { + setupLog.Error(err, "Unable to create controller", "Controller", "Lease") + os.Exit(1) + } + // +kubebuilder:scaffold:builder setupLog.Info("Starting manager") diff --git a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go index 12a44e874..8e3fad97b 100644 --- a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go +++ b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go @@ -120,6 +120,10 @@ type BackupSpec struct { // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // CompactionResources defines the compute Resources required by compaction job. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + CompactionResources *corev1.ResourceRequirements `json:"compactionResources,omitempty"` // FullSnapshotSchedule defines the cron standard schedule for full snapshots. // +optional FullSnapshotSchedule *string `json:"fullSnapshotSchedule,omitempty"`