From a21f4a5801994df23238aef479ce5a760e9080a3 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Fri, 12 Sep 2025 14:11:43 +0800 Subject: [PATCH 1/2] chore: add pg etcd example --- .../kubeblocks-for-postgresql/09-faqs.mdx | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 docs/en/preview/kubeblocks-for-postgresql/09-faqs.mdx diff --git a/docs/en/preview/kubeblocks-for-postgresql/09-faqs.mdx b/docs/en/preview/kubeblocks-for-postgresql/09-faqs.mdx new file mode 100644 index 00000000..6cb759fa --- /dev/null +++ b/docs/en/preview/kubeblocks-for-postgresql/09-faqs.mdx @@ -0,0 +1,71 @@ +--- +title: FAQs +description: FAQs of PostgreSQL +keywords: [KubeBlocks, PostgreSQL, Kubernetes Operator] +sidebar_position: 9 +sidebar_label: FAQs +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PostgreSQL FAQs + +## 1. Use ETCD as Patroni DCS + +KubeBlocks PostgreSQL uses the Kubernetes API itself as DCS (Distributed Config Store) by default. +But when the control plane is under extreme high load, it may lead to unexpected demotion of the primary replica. And it's recommended to use ETCD as DCS in such extreme cases. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster-etcd + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: "16.4.0" + env: + - name: DCS_ENABLE_KUBERNETES_API # unset this env if you use zookeeper or etcd, default to empty + - name: ETCD3_HOST + value: 'etcd-cluster-etcd-headless.demo.svc.cluster.local:2379' # where is your etcd? + # - name: ZOOKEEPER_HOSTS + # value: 'myzk-zookeeper-0.myzk-zookeeper-headless.demo.svc.cluster.local:2181' # where is your zookeeper? + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +The key fields are: +- `DCS_ENABLE_KUBERNETES_API`: Unset this env to use ETCD or ZooKeeper as DCS +- `ETCD3_HOST`: The host of ETCD cluster + +You can also use ZooKeeper as DCS by unsetting `DCS_ENABLE_KUBERNETES_API` and setting `ZOOKEEPER_HOSTS` to the host of ZooKeeper cluster. + +KubeBlocks has ETCD and ZooKeeper Addons in the `kubeblocks-addons` repository. You can refer to the following links for more details. +- https://github.com/apecloud/kubeblocks-addons/tree/main/examples/etcd +- https://github.com/apecloud/kubeblocks-addons/tree/main/examples/zookeeper + +You can shell into one of the etcd container to view the etcd data, and view the etcd data with etcdctl. + +```bash +etcdctl get /service --prefix +``` \ No newline at end of file From 69457c385ba4f2f1733a709de4af93953676c95c Mon Sep 17 00:00:00 2001 From: Shanshan Date: Fri, 12 Sep 2025 14:17:02 +0800 Subject: [PATCH 2/2] chore: update redis restore example --- .../06-restore-with-pitr.mdx | 38 ++++++++++++----- .../06-restore-with-pitr.mdx | 41 +++++++++++++------ 2 files changed, 56 insertions(+), 23 deletions(-) diff --git a/docs/en/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx b/docs/en/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx index e9df8d35..0248dc19 100644 --- a/docs/en/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx +++ b/docs/en/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx @@ -87,7 +87,7 @@ Apply this YAML configuration: apiVersion: apps.kubeblocks.io/v1 kind: Cluster metadata: - name: pg-restore-pitr + name: redis-restore-pitr namespace: demo annotations: # NOTE: replace with the continuouse backup name @@ -99,16 +99,34 @@ spec: topology: replication componentSpecs: - name: redis - serviceVersion: "14.7.2" - disableExporter: true - replicas: 1 + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 resources: limits: - cpu: "0.5" - memory: "0.5Gi" + cpu: '0.5' + memory: 0.5Gi requests: - cpu: "0.5" - memory: "0.5Gi" + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi volumeClaimTemplates: - name: data spec: @@ -142,7 +160,7 @@ metadata: name: redis-replication-restore namespace: demo spec: - clusterName: redis-replication-restore + clusterName: redis-restore-pitr force: false restore: backupName: @@ -167,7 +185,7 @@ To remove all created resources, delete the Redis cluster along with its namespa ```bash kubectl delete cluster redis-replication -n demo -kubectl delete cluster redis-replication-restore -n demo +kubectl delete cluster redis-restore-pitr -n demo kubectl delete ns demo ``` diff --git a/docs/en/release-1_0/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx b/docs/en/release-1_0/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx index 3a2fbf90..acafe88c 100644 --- a/docs/en/release-1_0/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx +++ b/docs/en/release-1_0/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx @@ -87,7 +87,7 @@ Apply this YAML configuration: apiVersion: apps.kubeblocks.io/v1 kind: Cluster metadata: - name: pg-restore-pitr + name: redis-restore-pitr namespace: demo annotations: # NOTE: replace with the continuouse backup name @@ -99,19 +99,34 @@ spec: topology: replication componentSpecs: - name: redis - serviceVersion: "14.7.2" - disableExporter: true - labels: - # NOTE: update the label accordingly - apps.kubeblocks.postgres.patroni/scope: pg-restore-pitr-redis - replicas: 1 + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 resources: limits: - cpu: "0.5" - memory: "0.5Gi" + cpu: '0.5' + memory: 0.5Gi requests: - cpu: "0.5" - memory: "0.5Gi" + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi volumeClaimTemplates: - name: data spec: @@ -145,7 +160,7 @@ metadata: name: redis-replication-restore namespace: demo spec: - clusterName: redis-replication-restore + clusterName: rredis-restore-pitr force: false restore: backupName: @@ -170,7 +185,7 @@ To remove all created resources, delete the Redis cluster along with its namespa ```bash kubectl delete cluster redis-replication -n demo -kubectl delete cluster redis-replication-restore -n demo +kubectl delete cluster redis-restore-pitr -n demo kubectl delete ns demo ```