Skip to content

Commit

Permalink
chore: add initial readreplica support to prebackuppod
Browse files Browse the repository at this point in the history
  • Loading branch information
shreddedbacon committed Jul 24, 2023
1 parent 34b22e4 commit 799f813
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 7 deletions.
35 changes: 33 additions & 2 deletions internal/templating/backups/template_prebackuppod.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (

k8upv1 "github.com/k8up-io/k8up/v2/api/v1"
k8upv1alpha1 "github.com/vshn/k8up/api/v1alpha1"
v1 "k8s.io/api/core/v1"
apivalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metavalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
Expand Down Expand Up @@ -89,6 +90,20 @@ func GeneratePreBackupPod(
k8upPBPSpec.Pod.ObjectMeta.Labels["prebackuppod"] = serviceValues.Name
prebackuppod.Spec = k8upPBPSpec

if prebackuppod.Spec.Pod.Spec.Containers[0].EnvFrom == nil {
prebackuppod.Spec.Pod.Spec.Containers[0].Env = append(prebackuppod.Spec.Pod.Spec.Containers[0].Env, v1.EnvVar{
Name: "BACKUP_DB_READREPLICAS",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
Key: fmt.Sprintf("%s_READREPLICAS", varFix(serviceValues.OverrideName)),
LocalObjectReference: v1.LocalObjectReference{
Name: "lagoon-env",
},
},
},
})
}

for key, value := range additionalLabels {
prebackuppod.ObjectMeta.Labels[key] = value
}
Expand Down Expand Up @@ -159,6 +174,20 @@ func GeneratePreBackupPod(
k8upPBPSpec.Pod.ObjectMeta.Labels["prebackuppod"] = serviceValues.Name
prebackuppod.Spec = k8upPBPSpec

if prebackuppod.Spec.Pod.Spec.Containers[0].EnvFrom == nil && serviceValues.DBaasReadReplica {
prebackuppod.Spec.Pod.Spec.Containers[0].Env = append(prebackuppod.Spec.Pod.Spec.Containers[0].Env, v1.EnvVar{
Name: "BACKUP_DB_READREPLICAS",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
Key: fmt.Sprintf("%s_READREPLICAS", serviceValues.OverrideName),
LocalObjectReference: v1.LocalObjectReference{
Name: "lagoon-env",
},
},
},
})
}

for key, value := range additionalLabels {
prebackuppod.ObjectMeta.Labels[key] = value
}
Expand Down Expand Up @@ -228,7 +257,9 @@ type PreBackupPods map[string]string
// this is just the first run at doing this, once the service template generator is introduced, this will need to be re-evaluated
var preBackupPodSpecs = PreBackupPods{
"mariadb-dbaas": `backupCommand: >-
/bin/sh -c "dump=$(mktemp) && mysqldump --max-allowed-packet=500M --events
/bin/sh -c "if [ ! -z $BACKUP_DB_READREPLICAS ]; then
BACKUP_DB_HOST=$(echo $BACKUP_DB_READREPLICAS | cut -d ',' -f1)
fi && dump=$(mktemp) && mysqldump --max-allowed-packet=500M --events
--routines --quick --add-locks --no-autocommit --single-transaction
--no-create-db --no-data -h $BACKUP_DB_HOST -u $BACKUP_DB_USERNAME
-p$BACKUP_DB_PASSWORD $BACKUP_DB_DATABASE > $dump && mysqldump
Expand Down Expand Up @@ -268,7 +299,7 @@ pod:
image: imagecache.amazeeio.cloud/amazeeio/alpine-mysql-client
imagePullPolicy: Always
name: {{ .Name }}-prebackuppod`,
"postgres-dbaas": `backupCommand: /bin/sh -c "PGPASSWORD=$BACKUP_DB_PASSWORD pg_dump --host=$BACKUP_DB_HOST --port=$BACKUP_DB_PORT --dbname=$BACKUP_DB_NAME --username=$BACKUP_DB_USERNAME --format=t -w"
"postgres-dbaas": `backupCommand: /bin/sh -c "if [ ! -z $BACKUP_DB_READREPLICAS ]; then BACKUP_DB_HOST=$(echo $BACKUP_DB_READREPLICAS | cut -d ',' -f1); fi && PGPASSWORD=$BACKUP_DB_PASSWORD pg_dump --host=$BACKUP_DB_HOST --port=$BACKUP_DB_PORT --dbname=$BACKUP_DB_NAME --username=$BACKUP_DB_USERNAME --format=t -w"
fileExtension: .{{ .Name }}.tar
pod:
spec:
Expand Down
3 changes: 3 additions & 0 deletions internal/templating/backups/template_prebackuppod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ func TestGeneratePreBackupPod(t *testing.T) {
OverrideName: "mariadb-database",
Type: "mariadb-dbaas",
DBaaSEnvironment: "development",
DBaasReadReplica: true,
},
},
Backup: generator.BackupConfiguration{
Expand Down Expand Up @@ -65,6 +66,7 @@ func TestGeneratePreBackupPod(t *testing.T) {
OverrideName: "postgres-database",
Type: "postgres-dbaas",
DBaaSEnvironment: "development",
DBaasReadReplica: true,
},
},
Backup: generator.BackupConfiguration{
Expand Down Expand Up @@ -92,6 +94,7 @@ func TestGeneratePreBackupPod(t *testing.T) {
OverrideName: "mongodb-database",
Type: "mongodb-dbaas",
DBaaSEnvironment: "development",
DBaasReadReplica: true,
},
},
Backup: generator.BackupConfiguration{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ metadata:
prebackuppod: mariadb-database
name: mariadb-database-prebackuppod
spec:
backupCommand: /bin/sh -c "dump=$(mktemp) && mysqldump --max-allowed-packet=500M
backupCommand: /bin/sh -c "if [ ! -z $BACKUP_DB_READREPLICAS ]; then BACKUP_DB_HOST=$(echo
$BACKUP_DB_READREPLICAS | cut -d ',' -f1) fi && dump=$(mktemp) && mysqldump --max-allowed-packet=500M
--events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db
--no-data -h $BACKUP_DB_HOST -u $BACKUP_DB_USERNAME -p$BACKUP_DB_PASSWORD $BACKUP_DB_DATABASE
> $dump && mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks
Expand Down Expand Up @@ -58,6 +59,11 @@ spec:
configMapKeyRef:
key: MARIADB_DATABASE_DATABASE
name: lagoon-env
- name: BACKUP_DB_READREPLICAS
valueFrom:
configMapKeyRef:
key: MARIADB_DATABASE_READREPLICAS
name: lagoon-env
image: imagecache.amazeeio.cloud/amazeeio/alpine-mysql-client
imagePullPolicy: Always
name: mariadb-database-prebackuppod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ metadata:
prebackuppod: postgres-database
name: postgres-database-prebackuppod
spec:
backupCommand: /bin/sh -c "PGPASSWORD=$BACKUP_DB_PASSWORD pg_dump --host=$BACKUP_DB_HOST
--port=$BACKUP_DB_PORT --dbname=$BACKUP_DB_NAME --username=$BACKUP_DB_USERNAME
--format=t -w"
backupCommand: /bin/sh -c "if [ ! -z $BACKUP_DB_READREPLICAS ]; then BACKUP_DB_HOST=$(echo
$BACKUP_DB_READREPLICAS | cut -d ',' -f1); fi && PGPASSWORD=$BACKUP_DB_PASSWORD
pg_dump --host=$BACKUP_DB_HOST --port=$BACKUP_DB_PORT --dbname=$BACKUP_DB_NAME
--username=$BACKUP_DB_USERNAME --format=t -w"
fileExtension: .postgres-database.tar
pod:
metadata:
Expand Down Expand Up @@ -54,6 +55,11 @@ spec:
configMapKeyRef:
key: POSTGRES_DATABASE_DATABASE
name: lagoon-env
- name: BACKUP_DB_READREPLICAS
valueFrom:
configMapKeyRef:
key: POSTGRES_DATABASE_READREPLICAS
name: lagoon-env
image: imagecache.amazeeio.cloud/uselagoon/php-8.0-cli
imagePullPolicy: Always
name: postgres-database-prebackuppod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,11 @@ spec:
configMapKeyRef:
key: MONGODB_DATABASE_DATABASE
name: lagoon-env
- name: BACKUP_DB_READREPLICAS
valueFrom:
configMapKeyRef:
key: MONGODB_DATABASE_READREPLICAS
name: lagoon-env
image: imagecache.amazeeio.cloud/uselagoon/php-8.0-cli
imagePullPolicy: Always
name: mongodb-database-prebackuppod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ metadata:
prebackuppod: mariadb
name: mariadb-prebackuppod
spec:
backupCommand: /bin/sh -c "dump=$(mktemp) && mysqldump --max-allowed-packet=500M
backupCommand: /bin/sh -c "if [ ! -z $BACKUP_DB_READREPLICAS ]; then BACKUP_DB_HOST=$(echo
$BACKUP_DB_READREPLICAS | cut -d ',' -f1) fi && dump=$(mktemp) && mysqldump --max-allowed-packet=500M
--events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db
--no-data -h $BACKUP_DB_HOST -u $BACKUP_DB_USERNAME -p$BACKUP_DB_PASSWORD $BACKUP_DB_DATABASE
> $dump && mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks
Expand Down Expand Up @@ -58,6 +59,11 @@ spec:
configMapKeyRef:
key: MARIADB_DATABASE
name: lagoon-env
- name: BACKUP_DB_READREPLICAS
valueFrom:
configMapKeyRef:
key: MARIADB_READREPLICAS
name: lagoon-env
image: imagecache.amazeeio.cloud/amazeeio/alpine-mysql-client
imagePullPolicy: Always
name: mariadb-prebackuppod
Expand Down

0 comments on commit 799f813

Please sign in to comment.