Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions charts/postgres-operator/crds/operatorconfigurations.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,8 @@ spec:
type: string
wal_s3_bucket:
type: string
wal_az_storage_account:
type: string
logical_backup:
type: object
properties:
Expand Down
3 changes: 3 additions & 0 deletions charts/postgres-operator/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,9 @@ configAwsOrGcp:
# GCS bucket to use for shipping WAL segments with WAL-E
# wal_gs_bucket: ""

# Azure Storage Account to use for shipping WAL segments with WAL-G
# wal_az_storage_account: ""

# configure K8s cron job managed by the operator
configLogicalBackup:
# image for pods of the logical backup job (example runs pg_dumpall)
Expand Down
57 changes: 57 additions & 0 deletions docs/administrator.md
Original file line number Diff line number Diff line change
Expand Up @@ -808,6 +808,63 @@ pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
...
```

### Azure setup

To configure the operator on Azure these prerequisites are needed:

* A storage account in the same region as the Kubernetes cluster.

The configuration parameters that we will be using are:

* `pod_environment_secret`
* `wal_az_storage_account`

1. Generate the K8s secret resource that will contain your storage account's
access key. You will need a copy of this secret in every namespace you want to
create postgresql clusters.

The latest version of WAL-G (v1.0) supports the use of a SASS token, but you'll
have to make due with using the primary or secondary access token until the
version of WAL-G is updated in the postgres-operator.

```yaml
apiVersion: v1
kind: Secret
metadata:
name: psql-backup-creds
namespace: default
type: Opaque
stringData:
AZURE_STORAGE_ACCESS_KEY: <primary or secondary access key>
```

2. Setup pod environment configmap that instructs the operator to use WAL-G,
instead of WAL-E, for backup and restore.
```yml
apiVersion: v1
kind: ConfigMap
metadata:
name: pod-env-overrides
namespace: postgres-operator-system
data:
# Any env variable used by spilo can be added
USE_WALG_BACKUP: "true"
USE_WALG_RESTORE: "true"
CLONE_USE_WALG_RESTORE: "true"
```

3. Setup your operator configuration values. With the `psql-backup-creds`
and `pod-env-overrides` resources applied to your cluster, ensure that the operator's configuration
is set up like the following:
```yml
...
aws_or_gcp:
pod_environment_secret: "pgsql-backup-creds"
pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
wal_az_storage_account: "postgresbackupsbucket28302F2" # name of storage account to save the WAL-G logs
...
```

### Restoring physical backups

If cluster members have to be (re)initialized restoring physical backups
Expand Down
6 changes: 6 additions & 0 deletions docs/reference/operator_parameters.md
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,12 @@ yet officially supported.
[service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform).
The default is empty

* **wal_az_storage_account**
Azure Storage Account to use for shipping WAL segments with WAL-G. The
storage account must exist and be accessible by Postgres pods. Note, only the
name of the storage account is required.
The default is empty.

* **log_s3_bucket**
S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS.
The bucket has to be present and accessible by Postgres pods. The default is
Expand Down
1 change: 1 addition & 0 deletions manifests/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ data:
# team_api_role_configuration: "log_statement:all"
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
# toleration: ""
# wal_az_storage_account: ""
# wal_gs_bucket: ""
# wal_s3_bucket: ""
watched_namespace: "*" # listen to all namespaces
Expand Down
2 changes: 2 additions & 0 deletions manifests/operatorconfiguration.crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,8 @@ spec:
type: string
log_s3_bucket:
type: string
wal_az_storage_account:
type: string
wal_gs_bucket:
type: string
wal_s3_bucket:
Expand Down
1 change: 1 addition & 0 deletions manifests/postgresql-operator-default-configuration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ configuration:
# gcp_credentials: ""
# kube_iam_role: ""
# log_s3_bucket: ""
# wal_az_storage_account: ""
# wal_gs_bucket: ""
# wal_s3_bucket: ""
logical_backup:
Expand Down
1 change: 1 addition & 0 deletions pkg/apis/acid.zalan.do/v1/operator_configuration_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ type AWSGCPConfiguration struct {
AWSRegion string `json:"aws_region,omitempty"`
WALGSBucket string `json:"wal_gs_bucket,omitempty"`
GCPCredentials string `json:"gcp_credentials,omitempty"`
WALAZStorageAccount string `json:"wal_az_storage_account,omitempty"`
LogS3Bucket string `json:"log_s3_bucket,omitempty"`
KubeIAMRole string `json:"kube_iam_role,omitempty"`
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"`
Expand Down
14 changes: 14 additions & 0 deletions pkg/cluster/k8sres.go
Original file line number Diff line number Diff line change
Expand Up @@ -798,6 +798,12 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}

if c.OpConfig.WALAZStorageAccount != "" {
envVars = append(envVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}

if c.OpConfig.GCPCredentials != "" {
envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials})
}
Expand Down Expand Up @@ -1806,6 +1812,14 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
},
}
result = append(result, envs...)
} else if c.OpConfig.WALAZStorageAccount != "" {
envs := []v1.EnvVar{
{
Name: "CLONE_AZURE_STORAGE_ACCOUNT",
Value: c.OpConfig.WALAZStorageAccount,
},
}
result = append(result, envs...)
} else {
c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.")
}
Expand Down
1 change: 1 addition & 0 deletions pkg/controller/operator_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole
result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket
result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials
result.WALAZStorageAccount = fromCRD.AWSGCP.WALAZStorageAccount
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials")
result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration
Expand Down
1 change: 1 addition & 0 deletions pkg/util/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ type Config struct {
KubeIAMRole string `name:"kube_iam_role"`
WALGSBucket string `name:"wal_gs_bucket"`
GCPCredentials string `name:"gcp_credentials"`
WALAZStorageAccount string `name:"wal_az_storage_account"`
AdditionalSecretMount string `name:"additional_secret_mount"`
AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"`
EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"`
Expand Down