From 19210aa36d8f674846bd7b0e81a1edd10473633f Mon Sep 17 00:00:00 2001 From: Peach Leach Date: Thu, 18 Sep 2025 15:55:59 -0400 Subject: [PATCH 1/2] Forward port changes to 25.4 Forward port changes to 25.4 --- .../physical-cluster-replication.md | 6 +-- .../interface-virtual-cluster.md | 2 +- src/current/v25.4/create-virtual-cluster.md | 2 +- src/current/v25.4/failover-replication.md | 45 +++++++++-------- ...physical-cluster-replication-monitoring.md | 1 - .../physical-cluster-replication-overview.md | 9 ++-- ...-cluster-replication-technical-overview.md | 8 ++-- .../set-up-physical-cluster-replication.md | 48 +++++++++---------- 8 files changed, 58 insertions(+), 63 deletions(-) diff --git a/src/current/_includes/v25.4/known-limitations/physical-cluster-replication.md b/src/current/_includes/v25.4/known-limitations/physical-cluster-replication.md index a00c34fc878..bcb768cbc87 100644 --- a/src/current/_includes/v25.4/known-limitations/physical-cluster-replication.md +++ b/src/current/_includes/v25.4/known-limitations/physical-cluster-replication.md @@ -1,6 +1,4 @@ - Physical cluster replication is supported in: - CockroachDB {{ site.data.products.core }} clusters on v23.2 or later. The primary cluster can be a [new]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#step-1-create-the-primary-cluster) or [existing]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#set-up-pcr-from-an-existing-cluster) cluster. The standby cluster must be a [new cluster started with the `--virtualized-empty` flag]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#step-2-create-the-standby-cluster). - - [CockroachDB {{ site.data.products.advanced }} in clusters]({% link cockroachcloud/physical-cluster-replication.md %}) on v24.3 or later. -- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}) in CockroachDB {{ site.data.products.core }}. -- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}). -- Before failover to the standby, the standby cluster does not support running [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) or [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}). + - [CockroachDB {{ site.data.products.advanced }} clusters]({% link cockroachcloud/physical-cluster-replication.md %}) on v24.3 or later. +- In CockroachDB {{ site.data.products.core }}, the primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}) in order to respect data placement configurations. diff --git a/src/current/_includes/v25.4/physical-replication/interface-virtual-cluster.md b/src/current/_includes/v25.4/physical-replication/interface-virtual-cluster.md index 02890c3fc83..6bfae39096e 100644 --- a/src/current/_includes/v25.4/physical-replication/interface-virtual-cluster.md +++ b/src/current/_includes/v25.4/physical-replication/interface-virtual-cluster.md @@ -1,2 +1,2 @@ - The system virtual cluster manages the cluster's control plane and the replication of the cluster's data. Admins connect to the system virtual cluster to configure and manage the underlying CockroachDB cluster, set up PCR, create and manage a virtual cluster, and observe metrics and logs for the CockroachDB cluster and each virtual cluster. -- Each other virtual cluster manages its own data plane. Users connect to a virtual cluster by default, rather than the system virtual cluster. To connect to the system virtual cluster, the connection string must be modified. Virtual clusters contain user data and run application workloads. When PCR is enabled, the non-system virtual cluster on both primary and secondary clusters is named `main`. +- The application virtual cluster manages the cluster’s data plane. Application virtual clusters contain user data and run application workloads. diff --git a/src/current/v25.4/create-virtual-cluster.md b/src/current/v25.4/create-virtual-cluster.md index c1ca09b2200..8847f42e5d9 100644 --- a/src/current/v25.4/create-virtual-cluster.md +++ b/src/current/v25.4/create-virtual-cluster.md @@ -62,7 +62,7 @@ To form a connection string similar to the example, include the following values Value | Description ----------------+------------ -`{replication user}` | The user on the primary cluster that has the `REPLICATION` system privilege. Refer to the [Create a replication user and password]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#create-a-replication-user-and-password) for more detail. +`{replication user}` | The user on the primary cluster that has the `REPLICATION` system privilege. Refer to [Create a user with replication privileges]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#create-a-user-with-replication-privileges) for more detail. `{password}` | The replication user's password. `{node ID or hostname}` | The node IP address or hostname of any node from the primary cluster. `options=ccluster=system` | The parameter to connect to the system virtual cluster on the primary cluster. diff --git a/src/current/v25.4/failover-replication.md b/src/current/v25.4/failover-replication.md index 75788baf1c1..dfe3edaf194 100644 --- a/src/current/v25.4/failover-replication.md +++ b/src/current/v25.4/failover-replication.md @@ -5,13 +5,9 @@ toc: true key: cutover-replication.html --- -{{site.data.alerts.callout_info}} -Physical cluster replication is supported in CockroachDB {{ site.data.products.core }} clusters. -{{site.data.alerts.end}} +_Failover_ in [**physical cluster replication (PCR)**]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) allows you to move application traffic from the active primary cluster to the passive standby cluster. When you complete the replication stream to initiate a failover, the job stops replicating data from the primary, sets the standby [virtual cluster]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}) to a point in time (in the past or future) where all ingested data is consistent, and then makes the standby virtual cluster ready to accept traffic. -_Failover_ in [**physical cluster replication (PCR)**]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) allows you to switch from the active primary cluster to the passive standby cluster that has ingested replicated data. When you complete the replication stream to initiate a failover, the job stops replicating data from the primary, sets the standby [virtual cluster]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}) to a point in time (in the past or future) where all ingested data is consistent, and then makes the standby virtual cluster ready to accept traffic. - -_Failback_ in PCR switches operations back to the original primary cluster (or a new cluster) after a failover event. When you initiate a failback, the job ensures the original primary is up to date with writes from the standby that happened after failover. The original primary cluster is then set as ready to accept application traffic once again. +After a failover event, you may want to return your operations to the original primary cluster (or a new cluster). _Failback_ in PCR does this by replicating new application traffic back onto the original primary cluster. When you initiate a failback, the job ensures the original primary is up to date with writes from the standby that happened after failover. The original primary cluster is then set as ready to accept application traffic once again. This page describes: @@ -21,8 +17,8 @@ This page describes: - After the PCR stream used an existing cluster as the primary cluster. - [**Job management**](#job-management) after a failover or failback. -{{site.data.alerts.callout_danger}} -Failover and failback do **not** redirect traffic automatically to the standby cluster. Once the failover or failback is complete, you must redirect application traffic to the standby (new) cluster. If you do not redirect traffic manually, writes to the primary (original) cluster may be lost. +{{site.data.alerts.callout_info}} +Failover and failback do **not** redirect traffic automatically to the standby cluster. Once the failover or failback is complete, you must redirect application traffic to the standby cluster. {{site.data.alerts.end}} ## Failover @@ -38,16 +34,19 @@ During PCR, jobs running on the primary cluster will replicate to the standby cl ### Step 1. Initiate the failover -To initiate a failover to the standby cluster, you can specify the point in time for the standby's promotion in the following ways. That is, the standby cluster's live data at the point of failover. Refer to the following sections for steps: +To initiate a failover to the standby cluster, specify the point in time for its promotion. At failover, the standby cluster’s data will reflect the state of the primary at the specified moment. Refer to the following sections for steps: -- [`LATEST`](#fail-over-to-the-most-recent-replicated-time): The most recent replicated timestamp. +- [`LATEST`](#fail-over-to-the-most-recent-replicated-time): The most recent replicated timestamp. This minimizes any data loss from the replication lag in asynchronous replication. - [Point-in-time](#fail-over-to-a-point-in-time): - - Past: A past timestamp within the [failover window]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process). + - Past: A past timestamp within the [failover window]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process) of up to 4 hours in the past. + {{site.data.alerts.callout_success}} + Failing over to a past point in time is useful if you need to recover from a recent human error + {{site.data.alerts.end}} - Future: A future timestamp for planning a failover. #### Fail over to the most recent replicated time -To initiate a failover to the most recent replicated timestamp, you can specify `LATEST` when you start the failover. The latest replicated time may be behind the actual time if there is [_replication lag_]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process) in the stream. Replication lag is the time between the most up-to-date replicated time and the actual time. +To initiate a failover to the most recent replicated timestamp, specify `LATEST`. Due to [_replication lag_]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process), the most recent replicated time may be behind the current actual time. Replication lag is the time difference between the most recent replicated time and the actual time. 1. To view the current replication timestamp, use: @@ -95,7 +94,7 @@ You can control the point in time that the PCR stream will fail over to. SHOW VIRTUAL CLUSTER main WITH REPLICATION STATUS; ~~~ - The `retained_time` response provides the earliest time to which you can fail over. + The `retained_time` response provides the earliest time to which you can fail over. This is up to four hours in the past. ~~~ id | name | source_tenant_name | source_cluster_uri | retained_time | replicated_time | replication_lag | failover_time | status @@ -174,10 +173,10 @@ To enable PCR again, from the new primary to the original primary (or a complete ## Failback -After failing over to the standby cluster, you may need to fail back to the original primary-standby cluster setup cluster to serve your application. Depending on the configuration of the primary cluster in the original PCR stream, use one of the following workflows: +After failing over to the standby cluster, you may want to return to your original configuration by failing back to the original primary-standby cluster setup. Depending on the configuration of the primary cluster in the original PCR stream, use one of the following workflows: -- [From the original standby cluster (after it was promoted during failover) to the original primary cluster](#fail-back-to-the-original-primary-cluster). -- [After the PCR stream used an existing cluster as the primary cluster](#fail-back-after-pcr-from-an-existing-cluster). +- [From the original standby cluster (after it was promoted during failover) to the original primary cluster](#fail-back-to-the-original-primary-cluster). If this failback is initiated within 24 hours of the failover, PCR replicates the net-new changes from the standby cluster to the primary cluster, rather than fully replacing the existing data in the primary cluster. +- [After the PCR stream used an existing cluster as the primary cluster](#fail-back-after-replicating-from-an-existing-primary-cluster). {{site.data.alerts.callout_info}} To move back to a different cluster that was not involved in the original PCR stream, set up a new PCR stream following the PCR [setup]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}) guide. @@ -208,7 +207,7 @@ This section illustrates the steps to fail back to the original primary cluster ALTER VIRTUAL CLUSTER {cluster_a} STOP SERVICE; ~~~ -1. Open another terminal window and generate a connection string for **Cluster B** using `cockroach encode-uri`: +1. Open another terminal window and generate a connection string for **Cluster B** using [`cockroach encode-uri`]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#step-3-manage-cluster-certificates-and-generate-connection-strings): {% include_cached copy-clipboard.html %} ~~~ shell @@ -279,7 +278,7 @@ This section illustrates the steps to fail back to the original primary cluster ALTER VIRTUAL CLUSTER {cluster_a} COMPLETE REPLICATION TO LATEST; ~~~ - The `failover_time` is the timestamp at which the replicated data is consistent. The cluster will revert any replicated data above this timestamp to ensure that the standby is consistent with the primary at that timestamp: + After the failover has successfully completed, it returns a `failover_time` timestamp, representing the time at which the replicated data is consistent. Note that the cluster reverts any replicated data above the `failover_time` to ensure that the standby is consistent with the primary at that time: ~~~ failover_time @@ -302,13 +301,13 @@ This section illustrates the steps to fail back to the original primary cluster SET CLUSTER SETTING server.controller.default_target_cluster='{cluster_a}'; ~~~ -At this point, **Cluster A** is once again the primary and **Cluster B** is once again the standby. The clusters are entirely independent. To direct application traffic to the primary (**Cluster A**), you will need to use your own network load balancers, DNS servers, or other network configuration to direct application traffic to **Cluster A**. To enable PCR again, from the primary to the standby (or a completely different cluster), refer to [Set Up Physical Cluster Replication]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}). +At this point, **Cluster A** has caught up to **Cluster B**. The clusters are entirely independent. To enable PCR again from the primary to the standby, refer to [Set Up Physical Cluster Replication]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}). -### Fail back after PCR from an existing cluster +### Fail back after replicating from an existing primary cluster You can replicate data from an existing CockroachDB cluster that does not have [cluster virtualization]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}) enabled to a standby cluster with cluster virtualization enabled. For instructions on setting up a PCR in this way, refer to [Set up PCR from an existing cluster]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#set-up-pcr-from-an-existing-cluster). -After a [failover](#failover) to the standby cluster, you may want to then set up PCR from the original standby cluster, which is now the primary, to another cluster, which will become the standby. There are couple of ways to set up a new standby, and some considerations. +After a [failover](#failover) to the standby cluster, you may want to set up PCR from the original standby cluster, which is now the primary, to another cluster, which will become the standby. There are multiple ways to set up a new standby, and some considerations. In the example, the clusters are named for reference: @@ -324,11 +323,11 @@ In the example, the clusters are named for reference: ## Job management -During PCR, jobs running on the primary cluster will replicate to the standby cluster. Once you have [completed a failover](#step-2-complete-the-failover) (or a [failback](#failback)), refer to the following sections for details on resuming jobs on the promoted cluster. +During PCR, jobs running on the primary cluster replicate to the standby cluster. Once you have [completed a failover](#step-2-complete-the-failover) (or a [failback](#failback)), refer to the following sections for details on resuming jobs on the promoted cluster. ### Backup schedules -[Backup schedules]({% link {{ page.version.version }}/manage-a-backup-schedule.md %}) will pause after failover on the promoted cluster. Take the following steps to resume jobs: +[Backup schedules]({% link {{ page.version.version }}/manage-a-backup-schedule.md %}) pause after failover on the promoted standby cluster. Take the following steps to resume jobs: 1. Verify that there are no other schedules running backups to the same [collection of backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#backup-collections), i.e., the schedule that was running on the original primary cluster. 1. [Resume]({% link {{ page.version.version }}/resume-schedules.md %}) the backup schedule on the promoted cluster. diff --git a/src/current/v25.4/physical-cluster-replication-monitoring.md b/src/current/v25.4/physical-cluster-replication-monitoring.md index 755f166ea80..68c2a8273ea 100644 --- a/src/current/v25.4/physical-cluster-replication-monitoring.md +++ b/src/current/v25.4/physical-cluster-replication-monitoring.md @@ -55,7 +55,6 @@ You can use Prometheus and Alertmanager to track and alert on PCR metrics. Refer We recommend tracking the following metrics: - `physical_replication.logical_bytes`: The logical bytes (the sum of all keys and values) ingested by all PCR jobs. -- `physical_replication.sst_bytes`: The [SST]({% link {{ page.version.version }}/architecture/storage-layer.md %}#ssts) bytes (compressed) sent to the KV layer by all PCR jobs. - `physical_replication.replicated_time_seconds`: The [replicated time]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process) of the physical replication stream in seconds since the Unix epoch. ## Data verification diff --git a/src/current/v25.4/physical-cluster-replication-overview.md b/src/current/v25.4/physical-cluster-replication-overview.md index b66b219c1c8..2ed567f7596 100644 --- a/src/current/v25.4/physical-cluster-replication-overview.md +++ b/src/current/v25.4/physical-cluster-replication-overview.md @@ -31,7 +31,7 @@ You can use PCR to: - **Transactional consistency**: Avoid conflicts in data after recovery; the replication completes to a transactionally consistent state. - **Improved RPO and RTO**: Depending on workload and deployment configuration, [replication lag]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}) between the primary and standby is generally in the tens-of-seconds range. The failover process from the primary cluster to the standby should typically happen within five minutes when completing a failover to the latest replicated time using [`LATEST`]({% link {{ page.version.version }}/alter-virtual-cluster.md %}#synopsis). - **Failover to a timestamp in the past or the future**: In the case of logical disasters or mistakes, you can [fail over]({% link {{ page.version.version }}/failover-replication.md %}) from the primary to the standby cluster to a timestamp in the past. This means that you can return the standby to a timestamp before the mistake was replicated to the standby. Furthermore, you can plan a failover by specifying a timestamp in the future. -- **Fast failback**: Switch back from the promoted standby cluster to the original primary cluster after a failover event without an initial scan. +- **Fast failback**: Switch back from the promoted standby cluster to the original primary cluster after a failover event by replicating net-new changes rather than fully replacing existing data for an initial scan. - **Read from standby cluster**: You can configure PCR to allow `SELECT` queries on the standby cluster. For more details, refer to [Start a PCR stream with read from standby]({% link {{ page.version.version }}/create-virtual-cluster.md %}#start-a-pcr-stream-with-read-from-standby). - **Monitoring**: To monitor the replication's initial progress, current status, and performance, you can use metrics available in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) and [Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). For more details, refer to [Physical Cluster Replication Monitoring]({% link {{ page.version.version }}/physical-cluster-replication-monitoring.md %}). @@ -48,6 +48,7 @@ Frequent large schema changes or imports may cause a significant spike in [repli This section is a quick overview of the initial requirements to start a replication stream. For more comprehensive guides, refer to: +- [Cluster Virtualization Overview]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}): for information on enabling cluster virtualization, a requirement for setting up PCR. - [Set Up Physical Cluster Replication]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}): for a tutorial on how to start a replication stream. - [Physical Cluster Replication Monitoring]({% link {{ page.version.version }}/physical-cluster-replication-monitoring.md %}): for detail on metrics and observability into a replication stream. - [Fail Over from a Primary Cluster to a Standby Cluster]({% link {{ page.version.version }}/failover-replication.md %}): for a guide on how to complete a replication stream and fail over to the standby cluster. @@ -68,8 +69,8 @@ Statement | Action ## Cluster versions and upgrades -{{site.data.alerts.callout_danger}} -The standby cluster must be at the same version as, or one version ahead of, the primary's virtual cluster. +{{site.data.alerts.callout_info}} +The entire standby cluster must be at the same version as, or one version ahead of, the primary's virtual cluster. {{site.data.alerts.end}} When PCR is enabled, upgrade with the following procedure. This upgrades the standby cluster before the primary cluster. Within the primary and standby CockroachDB clusters, the system virtual cluster must be at a cluster version greater than or equal to the virtual cluster: @@ -80,8 +81,6 @@ When PCR is enabled, upgrade with the following procedure. This upgrades the sta 1. [Finalize]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}#finalize-a-major-version-upgrade-manually) the upgrade on the standby's virtual cluster. 1. [Finalize]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}#finalize-a-major-version-upgrade-manually) the upgrade on the primary's virtual cluster. -The standby cluster must be at the same version as, or one version ahead of, the primary's virtual cluster at the time of [failover]({% link {{ page.version.version }}/failover-replication.md %}). - ## Demo video Learn how to use PCR to meet your RTO and RPO requirements with the following demo: diff --git a/src/current/v25.4/physical-cluster-replication-technical-overview.md b/src/current/v25.4/physical-cluster-replication-technical-overview.md index 8c576d8f8dd..fdcbc96f4cf 100644 --- a/src/current/v25.4/physical-cluster-replication-technical-overview.md +++ b/src/current/v25.4/physical-cluster-replication-technical-overview.md @@ -5,11 +5,11 @@ toc: true docs_area: manage --- -[**Physical cluster replication (PCR)**]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) automatically and continuously streams data from an active _primary_ CockroachDB cluster to a passive _standby_ cluster. Each cluster contains: a _system virtual cluster_ and an application [virtual cluster]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}) during the PCR stream: +[**Physical cluster replication (PCR)**]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) continuously and asynchronously replicates data from an active _primary_ CockroachDB cluster to a passive _standby_ cluster. When both clusters are virtualized, each cluster contains a _system virtual cluster_ and an application [virtual cluster]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}) during the PCR stream: {% include {{ page.version.version }}/physical-replication/interface-virtual-cluster.md %} -This separation of concerns means that the replication stream can operate without affecting work happening in a virtual cluster. +If you utilize the [read on standby](#start-up-sequence-with-read-on-standby) feature in PCR, the standby cluster has an additional reader virtual cluster that safely serves read requests on the replicating virtual cluster. ### PCR stream start-up sequence @@ -20,7 +20,7 @@ This separation of concerns means that the replication stream can operate withou The stream initialization proceeds as follows: -1. The standby's consumer job connects via its system virtual cluster to the primary cluster and starts the primary cluster's physical stream producer job. +1. The standby's consumer job connects to the primary cluster via the standby's system virtual cluster and starts the primary cluster's `REPLICATION STREAM PRODUCER` job. 1. The primary cluster chooses a timestamp at which to start the physical replication stream. Data on the primary is protected from [garbage collection]({% link {{ page.version.version }}/architecture/storage-layer.md %}#garbage-collection) until it is replicated to the standby using a [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps). 1. The primary cluster returns the timestamp and a [job ID]({% link {{ page.version.version }}/show-jobs.md %}#response) for the replication job. 1. The standby cluster retrieves a list of all nodes in the primary cluster. It uses this list to distribute work across all nodes in the standby cluster. @@ -53,7 +53,7 @@ If the primary cluster does not receive replicated time information from the sta ### Failover and promotion process -The tracked replicated time and the advancing protected timestamp allows the replication stream to also track _retained time_, which is a timestamp in the past indicating the lower bound that the replication stream could fail over to. Therefore, the _failover window_ for a replication job falls between the retained time and the replicated time. +The tracked replicated time and the advancing protected timestamp allow the replication stream to also track _retained time_, which is a timestamp in the past indicating the lower bound that the replication stream could fail over to. The retained time can be up to 4 hours in the past, due to the protected timestamp. Therefore, the _failover window_ for a replication job falls between the retained time and the replicated time. Timeline showing how the failover window is between the retained time and replicated time. diff --git a/src/current/v25.4/set-up-physical-cluster-replication.md b/src/current/v25.4/set-up-physical-cluster-replication.md index b69c3752065..12949d489cc 100644 --- a/src/current/v25.4/set-up-physical-cluster-replication.md +++ b/src/current/v25.4/set-up-physical-cluster-replication.md @@ -5,7 +5,11 @@ toc: true docs_area: manage --- -In this tutorial, you will set up [**physical cluster replication (PCR)**]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) between a primary cluster and standby cluster. The primary cluster is _active_, serving application traffic. The standby cluster is _passive_, accepting updates from the primary cluster. The replication stream will send changes from the primary to the standby. +{{site.data.alerts.callout_info}} +Physical cluster replication is supported in CockroachDB {{ site.data.products.core }} clusters and is in [limited access]({% link {{ page.version.version }}/cockroachdb-feature-availability.md %}) on [Cockroach Cloud]({% link cockroachcloud/physical-cluster-replication.md %}). +{{site.data.alerts.end}} + +In this tutorial, you will set up [**physical cluster replication (PCR)**]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) between a primary cluster and standby cluster. The primary cluster is _active_, serving application traffic. The standby cluster is _passive_, continuously receiving updates from the primary cluster. The replication stream replicates changes from the primary to the standby. The unit of replication is a [virtual cluster]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}), which is part of the underlying infrastructure in the primary and standby clusters. @@ -31,11 +35,11 @@ To set up PCR from an existing CockroachDB cluster, which will serve as the prim ## Before you begin -- Two separate CockroachDB clusters (primary and standby) with a minimum of three nodes each, and each using the same CockroachDB {{page.version.version}} version. The standby cluster should be the same version or one version ahead of the primary cluster. The primary and standby clusters must be configured with similar hardware profiles, number of nodes, and overall size. Significant discrepancies in the cluster configurations may result in degraded performance. +- You need two separate CockroachDB clusters (primary and standby), each with a minimum of three nodes. The standby cluster should be the same version or one version ahead of the primary cluster. The primary and standby clusters must be configured with similar hardware profiles, number of nodes, and overall size. Significant discrepancies in the cluster configurations may result in degraded performance. - To set up each cluster, you can follow [Deploy CockroachDB on Premises]({% link {{ page.version.version }}/deploy-cockroachdb-on-premises.md %}). When you initialize the cluster with the [`cockroach init`]({% link {{ page.version.version }}/cockroach-init.md %}) command, you **must** pass the `--virtualized` or `--virtualized-empty` flag. Refer to the cluster creation steps for the [primary cluster](#initialize-the-primary-cluster) and for the [standby cluster](#initialize-the-standby-cluster) for details. - The [Deploy CockroachDB on Premises]({% link {{ page.version.version }}/deploy-cockroachdb-on-premises.md %}) tutorial creates a self-signed certificate for each {{ site.data.products.core }} cluster. To create certificates signed by an external certificate authority, refer to [Create Security Certificates using OpenSSL]({% link {{ page.version.version }}/create-security-certificates-openssl.md %}). -- All nodes in each cluster will need access to the Certificate Authority for the other cluster. Refer to [Manage the cluster certificates](#step-3-manage-the-cluster-certificates). -- The primary and standby clusters **must have the same [region topology]({% link {{ page.version.version }}/topology-patterns.md %})**. For example, replicating a multi-region primary cluster to a single-region standby cluster is not supported. Mismatching regions between a multi-region primary and standby cluster is also not supported. +- All nodes in each cluster will need access to the Certificate Authority for the other cluster. Refer to [Manage cluster certificates](#step-3-manage-cluster-certificates-and-generate-connection-strings). +- The primary and standby clusters can have different [region topologies]({% link {{ page.version.version }}/topology-patterns.md %}). However, behavior for features that rely on multi-region primitives, such as Region by Row and Region by Table, may be affected. ## Step 1. Create the primary cluster @@ -99,7 +103,7 @@ Connect to your primary cluster's system virtual cluster using [`cockroach sql`] Because this is the primary cluster rather than the standby cluster, the `data_state` of all rows is `ready`, rather than `replicating` or another [status]({% link {{ page.version.version }}/physical-cluster-replication-monitoring.md %}). -### Create a replication user and password +### Create a user with replication privileges The standby cluster connects to the primary cluster's system virtual cluster using an identity with the `REPLICATIONSOURCE` [privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). Connect to the primary cluster's system virtual cluster and create a user with a password: @@ -110,6 +114,8 @@ The standby cluster connects to the primary cluster's system virtual cluster usi CREATE USER {your username} WITH PASSWORD '{your password}'; ~~~ + If you need to change the password later, refer to [`ALTER USER`]({% link {{ page.version.version }}/alter-user.md %}). + 1. Grant the [`REPLICATIONSOURCE` privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) to your user: {% include_cached copy-clipboard.html %} @@ -117,8 +123,6 @@ The standby cluster connects to the primary cluster's system virtual cluster usi GRANT SYSTEM REPLICATIONSOURCE TO {your username}; ~~~ -If you need to change the password later, refer to [`ALTER USER`]({% link {{ page.version.version }}/alter-user.md %}). - ### Connect to the primary virtual cluster (optional) 1. If you would like to run a sample workload on the primary's virtual cluster, open a new terminal window and use [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) to run the workload. @@ -219,7 +223,7 @@ Connect to your standby cluster's system virtual cluster using [`cockroach sql`] (1 rows) ~~~ -### Create a user for the standby cluster +### Create a user with replication privileges on the standby cluster Create a user to run the PCR stream and access the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) to observe the job: @@ -239,7 +243,7 @@ Create a user to run the PCR stream and access the [DB Console]({% link {{ page. Open the DB Console in your web browser: `https://{node IP or hostname}:8080/`, where you will be prompted for these credentials. Refer to [Physical Cluster Replication Monitoring]({% link {{ page.version.version }}/physical-cluster-replication-monitoring.md %}) for more detail on tracking relevant metrics for your replication stream. -## Step 3. Manage the cluster certificates +## Step 3. Manage cluster certificates and generate connection strings {{site.data.alerts.callout_danger}} It is important to carefully manage the exchange of CA certificates between clusters if you have generated self-signed certificates with `cockroach cert` as part of the [prerequisite deployment tutorial]({% link {{ page.version.version }}/deploy-cockroachdb-on-premises.md %}). @@ -247,17 +251,13 @@ It is important to carefully manage the exchange of CA certificates between clus To create certificates signed by an external certificate authority, refer to [Create Security Certificates using OpenSSL]({% link {{ page.version.version }}/create-security-certificates-openssl.md %}). {{site.data.alerts.end}} -At this point, the primary and standby clusters are both running. The next step allows the standby cluster to connect to the primary cluster and begin ingesting its data. Depending on how you manage certificates, you must ensure that all nodes on the primary and the standby cluster have access to the certificate of the other cluster. - -You can use the `cockroach encode-uri` command to generate a connection string containing a cluster's certificate for any [PCR statements]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}#manage-replication-in-the-sql-shell) that require a connection string. - -For example, in this tutorial you will need a connection string for the primary cluster when you start the replication stream from the standby. +At this point, the primary and standby clusters are both running. The next step creates a connection URI with the certifications needed to connect the two clusters. In most cases, we recommend ensuring that all nodes on the primary cluster have access to the certificate of the standby cluster, and vice versa. This ensures that PCR is able to parallelize the work. -To generate a connection string, pass the replication user, IP and port, along with the directory to the certificate for the primary cluster: +Use the `cockroach encode-uri` command to generate a connection string containing a cluster's certificate for any [PCR statements]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}#manage-replication-in-the-sql-shell) that require a connection string. Pass the replication user, IP and port, along with the path to the certificate for the **primary cluster**, into the `encode-uri` command: {% include_cached copy-clipboard.html %} ~~~ shell -cockroach encode-uri {replication user}:{password}@{node IP or hostname}:26257 --ca-cert certs/ca.crt --inline +cockroach encode-uri {replication user}:{password}@{node IP or hostname}:26257 --ca-cert {path to certs directory}/certs/ca.crt --inline ~~~ The connection string output contains the primary cluster's certificate: @@ -271,11 +271,11 @@ Copy the output ready for [Step 4](#step-4-start-replication), which requires th ## Step 4. Start replication -The system virtual cluster in the standby cluster initiates and controls the replication stream by pulling from the primary cluster. In this section, you will connect to the primary from the standby to initiate the replication stream. +The system virtual cluster in the standby cluster initializes and controls the replication stream by pulling from the primary cluster. In this section, you will connect to the primary from the standby to initiate the replication stream. 1. From the **standby** cluster, use your connection string to the primary: - If you generated the connection string using [`cockroach encode-uri`](#step-3-manage-the-cluster-certificates): + If you generated the connection string using [`cockroach encode-uri`](#step-3-manage-cluster-certificates-and-generate-connection-strings): {% include_cached copy-clipboard.html %} ~~~ sql @@ -285,7 +285,7 @@ The system virtual cluster in the standby cluster initiates and controls the rep ~~~ Otherwise, pass the connection string that contains: - - The replication user and password that you [created for the primary cluster](#create-a-replication-user-and-password). + - The replication user and password that you [created for the primary cluster](#create-a-user-with-replication-privileges). - The node IP address or hostname of one node from the primary cluster. - The path to the primary node's certificate on the standby cluster. @@ -350,12 +350,12 @@ The system virtual cluster in the standby cluster initiates and controls the rep ## Set up PCR from an existing cluster -You can replicate data from an existing CockroachDB cluster that does not have [cluster virtualization]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}) enabled to a standby cluster with cluster virtualization enabled. In the [PCR setup]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}), the existing cluster is the primary cluster, which serves application traffic. +You can set up PCR replication from an existing CockroachDB cluster that does not have [cluster virtualization]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}) enabled. However, the standby cluster must have cluster virtualization enabled. In the [PCR setup]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}), the existing cluster is the primary cluster. {{site.data.alerts.callout_info}} When you start PCR with an existing primary cluster that does **not** have [cluster virtualization]({% link {{ page.version.version }}/cluster-virtualization-overview.md %}) enabled, you will not be able to [_fail back_]({% link {{ page.version.version }}/failover-replication.md %}#failback) to the original primary cluster from the promoted, original standby. -For more details on the failback process when you have started PCR with a non-virtualized primary, refer to [Fail back after PCR from an existing cluster]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-after-pcr-from-an-existing-cluster). +For more details on the failback process when you have started PCR with a non-virtualized primary, refer to [Fail back after replicating from an existing cluster]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-after-replicating-from-an-existing-primary-cluster). {{site.data.alerts.end}} Before you begin, you will need: @@ -396,7 +396,7 @@ Before you begin, you will need: (1 row) ~~~ -1. To create the replication job, you will need a connection string for the **primary cluster** containing its CA certificate. For steps to generate a connection string with `cockroach encode-uri`, refer to [Step 3. Manage the cluster certificates](#step-3-manage-the-cluster-certificates). +1. To create the replication job, you will need a connection string for the **primary cluster** containing its CA certificate. For steps to generate a connection string with `cockroach encode-uri`, refer to [Step 3. Manage cluster certificates and generate connection strings](#step-3-manage-cluster-certificates-and-generate-connection-strings). 1. If you would like to run a test workload on your existing **primary cluster**, you can use [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) like the following: @@ -441,7 +441,7 @@ At this point, your replication stream will be running. To _fail over_ to the standby cluster, follow the instructions on the [Fail Over from a Primary Cluster to a Standby Cluster]({% link {{ page.version.version }}/failover-replication.md %}) page. -For details on how to _fail back_ after replicating a non-virtualized cluster, refer to [Fail back after PCR from an existing cluster]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-after-pcr-from-an-existing-cluster). +For details on how to _fail back_ after replicating a non-virtualized cluster, refer to [Fail back after replicating from an existing cluster]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-after-replicating-from-an-existing-primary-cluster). ## Connection reference @@ -460,7 +460,7 @@ Cluster | Virtual Cluster | Usage | URL and Parameters Primary | System | Set up a replication user and view running virtual clusters. Connect with [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}). | `"postgresql://root@{node IP or hostname}:{26257}?options=-ccluster=system&sslmode=verify-full"`Use the `--certs-dir` flag to specify the path to your certificate. Primary | Main | Add and run a workload with [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}). | `"postgresql://root@{node IP or hostname}:{26257}?options=-ccluster=main&sslmode=verify-full&sslrootcert=certs/ca.crt&sslcert=certs/client.root.crt&sslkey=certs/client.root.key"`

{% include {{ page.version.version }}/connect/cockroach-workload-parameters.md %} As a result, for the example in this tutorial, you will need: Standby | System | Manage the replication stream. Connect with [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}). | `"postgresql://root@{node IP or hostname}:{26257}?options=-ccluster=system&sslmode=verify-full"`Use the `--certs-dir` flag to specify the path to your certificate. -Standby/Primary | System | Connect to the other cluster. | `"postgresql://{replication user}:{password}@{node IP or hostname}:{26257}/defaultdb?options=-ccluster%3Dsystem&sslinline=true&sslmode=verify-full&sslrootcert=-----BEGIN+CERTIFICATE-----{encoded_cert}-----END+CERTIFICATE-----%0A"`

Generate the connection string with [`cockroach encode-uri`](#step-3-manage-the-cluster-certificates). Use the generated connection string in: +Standby/Primary | System | Connect to the other cluster. | `"postgresql://{replication user}:{password}@{node IP or hostname}:{26257}/defaultdb?options=-ccluster%3Dsystem&sslinline=true&sslmode=verify-full&sslrootcert=-----BEGIN+CERTIFICATE-----{encoded_cert}-----END+CERTIFICATE-----%0A"`

Generate the connection string with [`cockroach encode-uri`](#step-3-manage-cluster-certificates-and-generate-connection-strings). Use the generated connection string in: Standby | Read only | Run read queries on the standby's replicating virtual cluster | `"postgresql://root@{node IP or hostname}:{26257}?options=-ccluster=main-readonly&sslmode=verify-full"`Use the `--certs-dir` flag to specify the path to your certificate. ## What's next From 9052c61dded89aa829446472e06470d6468d9b05 Mon Sep 17 00:00:00 2001 From: Peach Leach Date: Thu, 18 Sep 2025 16:09:15 -0400 Subject: [PATCH 2/2] Fixed missing period --- src/current/v25.3/failover-replication.md | 2 +- src/current/v25.4/failover-replication.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/current/v25.3/failover-replication.md b/src/current/v25.3/failover-replication.md index dfe3edaf194..f13701393d7 100644 --- a/src/current/v25.3/failover-replication.md +++ b/src/current/v25.3/failover-replication.md @@ -40,7 +40,7 @@ To initiate a failover to the standby cluster, specify the point in time for its - [Point-in-time](#fail-over-to-a-point-in-time): - Past: A past timestamp within the [failover window]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process) of up to 4 hours in the past. {{site.data.alerts.callout_success}} - Failing over to a past point in time is useful if you need to recover from a recent human error + Failing over to a past point in time is useful if you need to recover from a recent human error. {{site.data.alerts.end}} - Future: A future timestamp for planning a failover. diff --git a/src/current/v25.4/failover-replication.md b/src/current/v25.4/failover-replication.md index dfe3edaf194..f13701393d7 100644 --- a/src/current/v25.4/failover-replication.md +++ b/src/current/v25.4/failover-replication.md @@ -40,7 +40,7 @@ To initiate a failover to the standby cluster, specify the point in time for its - [Point-in-time](#fail-over-to-a-point-in-time): - Past: A past timestamp within the [failover window]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process) of up to 4 hours in the past. {{site.data.alerts.callout_success}} - Failing over to a past point in time is useful if you need to recover from a recent human error + Failing over to a past point in time is useful if you need to recover from a recent human error. {{site.data.alerts.end}} - Future: A future timestamp for planning a failover.