diff --git a/src/current/_data/redirects.yml b/src/current/_data/redirects.yml index 5df4a678b5c..e210ae50b86 100644 --- a/src/current/_data/redirects.yml +++ b/src/current/_data/redirects.yml @@ -79,12 +79,6 @@ # Pages undergoing maintenance -- destination: migration-overview.md - sources: - - migrate-from-oracle.md - temporary: true - versions: ['v21.2', 'v22.1', 'v22.2', 'v23.1'] - # Renamed pages - destination: admin-ui-overview.md diff --git a/src/current/_includes/v23.1/sidebar-data/migrate.json b/src/current/_includes/v23.1/sidebar-data/migrate.json index 949b2754d81..92a72d90371 100644 --- a/src/current/_includes/v23.1/sidebar-data/migrate.json +++ b/src/current/_includes/v23.1/sidebar-data/migrate.json @@ -9,7 +9,7 @@ ] }, { - "title": "Migration Tools", + "title": "MOLT Tools", "items": [ { "title": "Schema Conversion Tool", @@ -18,7 +18,7 @@ ] }, { - "title": "MOLT Verify", + "title": "Verify", "urls": [ "/${VERSION}/molt-verify.html" ] @@ -28,7 +28,12 @@ "urls": [ "/${VERSION}/live-migration-service.html" ] - }, + } + ] + }, + { + "title": "Third-Party Migration Tools", + "items": [ { "title": "AWS DMS", "urls": [ @@ -120,6 +125,12 @@ "/${VERSION}/migrate-from-mysql.html" ] }, + { + "title": "Migrate from Oracle", + "urls": [ + "/${VERSION}/migrate-from-oracle.html" + ] + }, { "title": "Migration Strategy: Lift and Shift", "urls": [ diff --git a/src/current/_includes/v23.2/sidebar-data/migrate.json b/src/current/_includes/v23.2/sidebar-data/migrate.json index 9b87e0c2f31..5b6338a628b 100644 --- a/src/current/_includes/v23.2/sidebar-data/migrate.json +++ b/src/current/_includes/v23.2/sidebar-data/migrate.json @@ -9,7 +9,7 @@ ] }, { - "title": "Migration Tools", + "title": "MOLT Tools", "items": [ { "title": "Schema Conversion Tool", @@ -18,13 +18,13 @@ ] }, { - "title": "MOLT Fetch", + "title": "Fetch", "urls": [ "/${VERSION}/molt-fetch.html" ] }, { - "title": "MOLT Verify", + "title": "Verify", "urls": [ "/${VERSION}/molt-verify.html" ] @@ -34,7 +34,12 @@ "urls": [ "/${VERSION}/live-migration-service.html" ] - }, + } + ] + }, + { + "title": "Third-Party Migration Tools", + "items": [ { "title": "AWS DMS", "urls": [ @@ -126,6 +131,12 @@ "/${VERSION}/migrate-from-mysql.html" ] }, + { + "title": "Migrate from Oracle", + "urls": [ + "/${VERSION}/migrate-from-oracle.html" + ] + }, { "title": "Migration Strategy: Lift and Shift", "urls": [ diff --git a/src/current/_includes/v24.1/sidebar-data/migrate.json b/src/current/_includes/v24.1/sidebar-data/migrate.json index 9b87e0c2f31..5b6338a628b 100644 --- a/src/current/_includes/v24.1/sidebar-data/migrate.json +++ b/src/current/_includes/v24.1/sidebar-data/migrate.json @@ -9,7 +9,7 @@ ] }, { - "title": "Migration Tools", + "title": "MOLT Tools", "items": [ { "title": "Schema Conversion Tool", @@ -18,13 +18,13 @@ ] }, { - "title": "MOLT Fetch", + "title": "Fetch", "urls": [ "/${VERSION}/molt-fetch.html" ] }, { - "title": "MOLT Verify", + "title": "Verify", "urls": [ "/${VERSION}/molt-verify.html" ] @@ -34,7 +34,12 @@ "urls": [ "/${VERSION}/live-migration-service.html" ] - }, + } + ] + }, + { + "title": "Third-Party Migration Tools", + "items": [ { "title": "AWS DMS", "urls": [ @@ -126,6 +131,12 @@ "/${VERSION}/migrate-from-mysql.html" ] }, + { + "title": "Migrate from Oracle", + "urls": [ + "/${VERSION}/migrate-from-oracle.html" + ] + }, { "title": "Migration Strategy: Lift and Shift", "urls": [ diff --git a/src/current/cockroachcloud/migrations-page.md b/src/current/cockroachcloud/migrations-page.md index 2655fb9da3d..af9f7bcae63 100644 --- a/src/current/cockroachcloud/migrations-page.md +++ b/src/current/cockroachcloud/migrations-page.md @@ -1,6 +1,6 @@ --- -title: Use the Schema Conversion Tool -summary: Use the Schema Conversion Tool to begin a database migration to CockroachDB. +title: Use the MOLT Schema Conversion Tool +summary: Use the MOLT Schema Conversion Tool to begin a database migration to CockroachDB. toc: true cloud: true docs_area: migrate @@ -8,11 +8,11 @@ docs_area: migrate {% capture version_prefix %}{{site.current_cloud_version}}/{% endcapture %} -The **Migrations** page on the CockroachDB {{ site.data.products.cloud }} Console features a **Schema Conversion Tool** that helps you: +The **Migrations** page on the CockroachDB {{ site.data.products.cloud }} Console features the MOLT Schema Conversion Tool. This tool helps you: - Convert a schema from a PostgreSQL, MySQL, Oracle, or Microsoft SQL Server database for use with CockroachDB. -- [Export the converted schema.](#export-the-schema) {% include cockroachcloud/migration/sct-self-hosted.md %} - Migrate directly to a CockroachDB {{ site.data.products.cloud }} database that uses the converted schema. You specify the target database and database owner when [migrating the schema](#migrate-the-schema). +- [Export the converted schema.](#export-the-schema) {% include cockroachcloud/migration/sct-self-hosted.md %} {{site.data.alerts.callout_info}} The **Migrations** page is used to convert a schema for use with CockroachDB and to create a new database that uses the schema. It does not include moving data to the new database. For details on all steps required to complete a database migration, see the [Migration Overview]({% link {{version_prefix}}migration-overview.md %}). @@ -102,10 +102,6 @@ The dump file must be smaller than 4 MB. `INSERT` and `COPY` statements will be
### Use Credentials -{{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} -{{site.data.alerts.end}} - The Schema Conversion Tool can connect directly to a PostgreSQL or MySQL database to obtain the schema. To add a schema using credentials: 1. In step 2 of the **Add SQL Schema** dialog, click **Use Credential**. Select the credentials to use. If the list is empty, this is because no credentials have been created for the selected database type. You can [add credentials](#add-database-credentials) directly from the pulldown menu. @@ -125,7 +121,7 @@ Credentials can be added for PostgreSQL and MySQL databases. 1. Provide the following information: - A **Credential Name** to associate with the credentials. - The **Dialect** of the database you are connecting to. Currently, PostgreSQL and MySQL are supported. - - The **Host** for accessing the database. For example, `migrations.cockroachlabs.com`. Local hosts such as `localhost` and `127.0.0.1` are not allowed. + - The **Host** (i.e., hostname or IP address) for accessing the database. Exclude the protocol (e.g., `tcp://`). For example, `migrations.cockroachlabs.com`. Local hosts such as `localhost` and `127.0.0.1` are not allowed. - The **Port** for accessing the database. - A valid **Username** and **Password** for accessing the database. - The **Database Name** to access. The Schema Conversion Tool will obtain the schema for this database. diff --git a/src/current/v23.1/live-migration-service.md b/src/current/v23.1/live-migration-service.md index 15f1ec55e75..dec3c51062d 100644 --- a/src/current/v23.1/live-migration-service.md +++ b/src/current/v23.1/live-migration-service.md @@ -9,7 +9,7 @@ docs_area: migrate {% include feature-phases/preview.md %} {{site.data.alerts.end}} -MOLT LMS (Live Migration Service) is used to perform a [live migration]({% link {{ page.version.version }}/migration-overview.md %}#minimal-downtime) to CockroachDB. +MOLT LMS (Live Migration Service) is used during a [live migration]({% link {{ page.version.version }}/migration-overview.md %}#minimal-downtime) to CockroachDB. The LMS is a self-hosted, horizontally scalable proxy that routes traffic between an application, a source database, and a target CockroachDB database. You use the LMS to control which database, as the "source of truth", is serving reads and writes to an application. You can optionally configure the LMS to [shadow production traffic](#shadowing-modes) from the source database and validate the query results on CockroachDB. When you have sufficiently tested your application and are confident with its consistency and performance on CockroachDB, you use the LMS to [perform the cutover](#perform-a-cutover) to CockroachDB. diff --git a/src/current/v23.1/migrate-from-oracle.md b/src/current/v23.1/migrate-from-oracle.md index 7606c9d9521..0709925396c 100644 --- a/src/current/v23.1/migrate-from-oracle.md +++ b/src/current/v23.1/migrate-from-oracle.md @@ -6,10 +6,10 @@ docs_area: migrate --- {{site.data.alerts.callout_danger}} -The instructions on this page require updates. We currently recommend [using AWS Database Migration Service (DMS) to migrate data]({% link {{ page.version.version }}/aws-dms.md %}) from Oracle to CockroachDB. You can also [migrate from CSV]({% link {{ page.version.version }}/migrate-from-csv.md %}). -{{site.data.alerts.end}} +The instructions on this page are outdated. Use the [Schema Conversion Tool]({% link cockroachcloud/migrations-page.md %}?filters=oracle) to convert an Oracle schema into a compatible CockroachDB schema, and a tool such as [AWS Database Migration Service (DMS)]({% link {{ page.version.version }}/aws-dms.md %}) or [Qlik]({% link {{ page.version.version }}/qlik.md %}) to migrate data from Oracle to CockroachDB. -This page has instructions for migrating data from Oracle into CockroachDB by [importing]({% link {{ page.version.version }}/import.md %}) CSV files. Note that `IMPORT` only works for creating new tables. For information on how to add CSV data to existing tables, see [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}). +`IMPORT` is deprecated. To move data into CockroachDB, use [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) or [`COPY FROM`]({% link {{ page.version.version }}/copy-from.md %}). +{{site.data.alerts.end}} To illustrate this process, we use the following sample data and tools: diff --git a/src/current/v23.2/live-migration-service.md b/src/current/v23.2/live-migration-service.md index e106034bd30..7f619fd3da0 100644 --- a/src/current/v23.2/live-migration-service.md +++ b/src/current/v23.2/live-migration-service.md @@ -9,7 +9,7 @@ docs_area: migrate {% include feature-phases/preview.md %} {{site.data.alerts.end}} -MOLT LMS (Live Migration Service) is used to perform a [live migration]({% link {{ page.version.version }}/migration-overview.md %}#minimal-downtime) to CockroachDB. +MOLT LMS (Live Migration Service) is used during a [live migration]({% link {{ page.version.version }}/migration-overview.md %}#minimal-downtime) to CockroachDB. The LMS is a self-hosted, horizontally scalable proxy that routes traffic between an application, a source database, and a target CockroachDB database. You use the LMS to control which database, as the "source of truth", is serving reads and writes to an application. You can optionally configure the LMS to [shadow production traffic](#shadowing-modes) from the source database and validate the query results on CockroachDB. When you have sufficiently tested your application and are confident with its consistency and performance on CockroachDB, you use the LMS to [perform the cutover](#perform-a-cutover) to CockroachDB. diff --git a/src/current/v23.2/migrate-from-oracle.md b/src/current/v23.2/migrate-from-oracle.md index 7606c9d9521..ed8e3f901d9 100644 --- a/src/current/v23.2/migrate-from-oracle.md +++ b/src/current/v23.2/migrate-from-oracle.md @@ -6,10 +6,10 @@ docs_area: migrate --- {{site.data.alerts.callout_danger}} -The instructions on this page require updates. We currently recommend [using AWS Database Migration Service (DMS) to migrate data]({% link {{ page.version.version }}/aws-dms.md %}) from Oracle to CockroachDB. You can also [migrate from CSV]({% link {{ page.version.version }}/migrate-from-csv.md %}). -{{site.data.alerts.end}} +The instructions on this page are outdated. Use the [Schema Conversion Tool]({% link cockroachcloud/migrations-page.md %}?filters=oracle) to convert an Oracle schema into a compatible CockroachDB schema, and a tool such as [AWS Database Migration Service (DMS)]({% link {{ page.version.version }}/aws-dms.md %}) or [Qlik]({% link {{ page.version.version }}/qlik.md %}) to migrate data from Oracle to CockroachDB. -This page has instructions for migrating data from Oracle into CockroachDB by [importing]({% link {{ page.version.version }}/import.md %}) CSV files. Note that `IMPORT` only works for creating new tables. For information on how to add CSV data to existing tables, see [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}). +Note that `IMPORT` is deprecated. To move data into CockroachDB, use [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) or [`COPY FROM`]({% link {{ page.version.version }}/copy-from.md %}). +{{site.data.alerts.end}} To illustrate this process, we use the following sample data and tools: diff --git a/src/current/v23.2/migration-overview.md b/src/current/v23.2/migration-overview.md index 3912952a489..3e1372af46a 100644 --- a/src/current/v23.2/migration-overview.md +++ b/src/current/v23.2/migration-overview.md @@ -62,17 +62,17 @@ A lift-and-shift approach is the most straightforward. However, it's important t - *Reduced functionality* takes some, but not all, application functionality offline. For example, you can disable writes but not reads while you migrate the application data, and queue data to be written after completing the migration. -For an overview of lift-and-shift migrations to CockroachDB, see [Lift and Shift](#lift-and-shift). For considerations and details about the pros and cons of this approach, see [Migration Strategy: Lift and Shift]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}). +For an overview of lift-and-shift migrations to CockroachDB, see [Lift and Shift](#lift-and-shift). #### Minimal downtime -If your application cannot tolerate downtime, then you should aim for a "zero-downtime" approach. "Zero" means that downtime is reduced to either an absolute minimum or zero, such that users do not notice the migration. +If your application cannot tolerate downtime, then you should aim for a "zero-downtime" approach. This reduces downtime to an absolute minimum, such that users do not notice the migration. The minimum possible downtime depends on whether you can tolerate inconsistency in the migrated data: -- *Consistent* migrations reduce downtime to an absolute minimum (i.e., from 30 seconds to sub-seconds) while keeping data synchronized between the source database and CockroachDB. **Consistency requires downtime.** In this approach, downtime occurs right before [cutover](#cutover-strategy), as you drain the remaining transactions from the source database to CockroachDB. +- Migrations performed using *consistent cutover* reduce downtime to an absolute minimum (i.e., seconds or sub-seconds) while keeping data synchronized between the source database and CockroachDB. **Consistency requires downtime.** In this approach, downtime occurs right before [cutover](#cutover-strategy), as you drain the remaining transactions from the source database to CockroachDB. -- *Inconsistent* migrations can reduce downtime to zero. These require the most preparation, and typically allow read/write traffic to both databases for at least a small amount of time, thereby sacrificing consistency for availability. {% comment %}You can use the CockroachDB Live Migration Service (MOLT LMS) to run application queries simultaneously on your source database and CockroachDB.{% endcomment %} Without stopping application traffic, you perform an immediate [cutover](#cutover-strategy), while assuming that some writes will not be replicated to CockroachDB. You may want to manually reconcile these data inconsistencies after switching over. +- Migrations performed using *immediate cutover* can reduce downtime to zero. These require the most preparation, and typically allow read/write traffic to both databases for at least a short period of time, sacrificing consistency for availability. {% comment %}You can use the CockroachDB Live Migration Service (MOLT LMS) to run application queries simultaneously on your source database and CockroachDB.{% endcomment %} Without stopping application traffic, you perform an **immediate** [cutover](#cutover-strategy), while assuming that some writes will not be replicated to CockroachDB. You may want to manually reconcile these data inconsistencies after switching over. For an overview of zero-downtime migrations to CockroachDB, see [Zero Downtime](#zero-downtime). {% comment %}For details, see [Migration Strategy: Zero Downtime](migration-strategy-zero-downtime).{% endcomment %} @@ -245,9 +245,11 @@ Then import the converted schema to a CockroachDB cluster: Before moving data, Cockroach Labs recommends [dropping any indexes]({% link {{ page.version.version }}/drop-index.md %}) on the CockroachDB database. The indexes can be [recreated]({% link {{ page.version.version }}/create-index.md %}) after the data is loaded. Doing so will optimize performance. {{site.data.alerts.end}} -After [converting the schema](#convert-the-schema), load your data into CockroachDB so that you can [test your application queries](#validate-queries). Then use one of the following methods to migrate the data (you may need to use additional tooling to extract and/or convert the data to an appropriate file format): +After [converting the schema](#convert-the-schema), load your data into CockroachDB so that you can [test your application queries](#validate-queries). Then use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. -- {% include {{ page.version.version }}/migration/load-data-import-into.md %} Typically, initial data loading during a database migration will not be running concurrently with application traffic, so the fact that `IMPORT INTO` takes the table offline may not have any observable availability impact. +Alternatively, you can use one of the following methods to migrate the data. Additional tooling may be required to extract or convert the data to a supported file format. + +- {% include {{ page.version.version }}/migration/load-data-import-into.md %} Typically during a migration, data is initially loaded before foreground application traffic begins to be served, so the impact of taking the table offline when running `IMPORT INTO` may be minimal. - {% include {{ page.version.version }}/migration/load-data-third-party.md %} Within the tool, you can select the database tables to migrate to the test cluster. - {% include {{ page.version.version }}/migration/load-data-copy-from.md %} @@ -259,9 +261,9 @@ Note that CockroachDB defaults to the [`SERIALIZABLE`]({% link {{ page.version.v ##### Shadowing -You can "shadow" your production workload by executing your source SQL statements on CockroachDB in parallel. The [CockroachDB Live Migration Service (LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) can perform shadowing. You can then [test the queries](#test-query-results-and-performance) on CockroachDB for consistency, performance, and potential issues with the migration. +You can "shadow" your production workload by executing your source SQL statements on CockroachDB in parallel. You can then [validate the queries](#test-query-results-and-performance) on CockroachDB for consistency, performance, and potential issues with the migration. -Shadowing may not be necessary or practical for your workload. For example, because transactions are serialized on CockroachDB, this will limit your ability to validate the performance of high-throughput workloads. +The [CockroachDB Live Migration Service (MOLT LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) can [perform shadowing]({% link {{ page.version.version }}/live-migration-service.md %}#shadowing-modes). This is intended only for [testing](#test-query-results-and-performance) or [performing a dry run](#perform-a-dry-run). Shadowing should **not** be used in production when performing a [live migration](#zero-downtime). ##### Test query results and performance @@ -310,35 +312,32 @@ Using this method, consistency is achieved by only performing the cutover once a The following is a high-level overview of the migration steps. For considerations and details about the pros and cons of this approach, see [Migration Strategy: Lift and Shift]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}). 1. Stop application traffic to your source database. **This begins downtime.** -1. Move data in one of the following ways: - - {% include {{ page.version.version }}/migration/load-data-import-into.md %} - - {% include {{ page.version.version }}/migration/load-data-third-party.md %} - - {% include {{ page.version.version }}/migration/load-data-copy-from.md %} -1. After the data is migrated, you can use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. +1. Use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. +1. After the data is migrated, use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. 1. Perform a [cutover](#cutover-strategy) by resuming application traffic, now to CockroachDB. {% comment %}1. If you want the ability to [roll back](#all-at-once-rollback) the migration, replicate data back to the source database.{% endcomment %} ### Zero Downtime -Using this method, downtime is minimized by performing the cutover while writes are still being replicated from the source database to CockroachDB. Inconsistencies are resolved through manual reconciliation. - -The following is a high-level overview of the migration steps. {% comment %}For details on this migration strategy, see [Migration Strategy: Zero Downtime]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}).{% endcomment %} +During a "live migration", downtime is minimized by performing the cutover while writes are still being replicated from the source database to CockroachDB. Inconsistencies are resolved through manual reconciliation. -{% comment %}You can use the CockroachDB Live Migration Service (MOLT LMS) to run application queries simultaneously on your source database and CockroachDB.{% endcomment %} +The following is a high-level overview of the migration steps. The two approaches are mutually exclusive, and each has [tradeoffs](#minimal-downtime). {% comment %}For details on this migration strategy, see [Migration Strategy: Zero Downtime]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}).{% endcomment %} To prioritize consistency and minimize downtime: -1. {% include {{ page.version.version }}/migration/load-data-third-party.md %} Select the tool's option to **replicate ongoing changes** after performing the initial load of data into CockroachDB. -1. As the data is migrating, you can use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. -1. Once nearly all data from your source database has been moved to CockroachDB (for example, with a <1 second delay or <1000 rows), stop application traffic to your source database. **This begins downtime.** -1. Wait for replication to CockroachDB to complete. -1. Perform a [cutover](#cutover-strategy) by resuming application traffic, now to CockroachDB. +1. Set up the [CockroachDB Live Migration Service (MOLT LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) to proxy for application traffic between your source database and CockroachDB. Do **not** shadow the application traffic. +1. Use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. Use the tool to [**replicate ongoing changes**]({% link {{ page.version.version }}/molt-fetch.md %}#replication) after it performs the initial load of data into CockroachDB. +1. As the data is migrating, use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. +1. After nearly all data from your source database has been moved to CockroachDB (for example, with a <1-second delay or <1000 rows), use MOLT LMS to begin a [*consistent cutover*]({% link {{ page.version.version }}/live-migration-service.md %}#consistent-cutover) and stop application traffic to your source database. **This begins downtime.** +1. Wait for MOLT Fetch to finish replicating changes to CockroachDB. +1. Use MOLT LMS to commit the [consistent cutover]({% link {{ page.version.version }}/live-migration-service.md %}#consistent-cutover). This resumes application traffic, now to CockroachDB. To achieve zero downtime with inconsistency: -1. {% include {{ page.version.version }}/migration/load-data-third-party.md %} Select the tool's option to replicate ongoing changes after performing the initial load of data into CockroachDB. +1. Set up the [CockroachDB Live Migration Service (MOLT LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) to proxy for application traffic between your source database and CockroachDB. Use a [shadowing mode]({% link {{ page.version.version }}/live-migration-service.md %}#shadowing-modes) to run application queries simultaneously on your source database and CockroachDB. +1. Use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. Use the tool to **replicate ongoing changes** after performing the initial load of data into CockroachDB. 1. As the data is migrating, you can use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. -1. Once nearly all data from your source database has been moved to CockroachDB (for example, with a <1 second delay or <1000 rows), perform a [cutover](#cutover-strategy) by pointing application traffic to CockroachDB. +1. After nearly all data from your source database has been moved to CockroachDB (for example, with a <1 second delay or <1000 rows), perform an [*immediate cutover*](#cutover-strategy) by pointing application traffic to CockroachDB. 1. Manually reconcile any inconsistencies caused by writes that were not replicated during the cutover. 1. Close the connection to the source database when you are ready to finish the migration. diff --git a/src/current/v24.1/live-migration-service.md b/src/current/v24.1/live-migration-service.md index c3b22310421..46befbfc939 100644 --- a/src/current/v24.1/live-migration-service.md +++ b/src/current/v24.1/live-migration-service.md @@ -9,7 +9,7 @@ docs_area: migrate {% include feature-phases/preview.md %} {{site.data.alerts.end}} -MOLT LMS (Live Migration Service) is used to perform a [live migration]({% link {{ page.version.version }}/migration-overview.md %}#minimal-downtime) to CockroachDB. +MOLT LMS (Live Migration Service) is used during a [live migration]({% link {{ page.version.version }}/migration-overview.md %}#minimal-downtime) to CockroachDB. The LMS is a self-hosted, horizontally scalable proxy that routes traffic between an application, a source database, and a target CockroachDB database. You use the LMS to control which database, as the "source of truth", is serving reads and writes to an application. You can optionally configure the LMS to [shadow production traffic](#shadowing-modes) from the source database and validate the query results on CockroachDB. When you have sufficiently tested your application and are confident with its consistency and performance on CockroachDB, you use the LMS to [perform the cutover](#perform-a-cutover) to CockroachDB. diff --git a/src/current/v24.1/migrate-from-mysql.md b/src/current/v24.1/migrate-from-mysql.md index 18150b7df1e..71485a01e5b 100644 --- a/src/current/v24.1/migrate-from-mysql.md +++ b/src/current/v24.1/migrate-from-mysql.md @@ -158,7 +158,7 @@ Use the [Schema Conversion Tool](https://www.cockroachlabs.com/docs/cockroachclo Click **Save**. - This is a workaround to prevent [data validation](#step-3-validate-the-migrated-data) from failing due to collation mismatches. For more details, see the [MOLT Verify] ({% link {{ page.version.version }}/molt-verify.md %}#limitations) documentation. + This is a workaround to prevent [data validation](#step-3-validate-the-migrated-data) from failing due to collation mismatches. For more details, see the [MOLT Verify] ({% link {{ page.version.version }}/molt-verify.md %}#known-limitations) documentation. 1. Click [**Migrate Schema**](https://www.cockroachlabs.com/docs/cockroachcloud/migrations-page?filters=mysql#migrate-the-schema) to create a new {{ site.data.products.serverless }} cluster with the converted schema. Name the database `world`. diff --git a/src/current/v24.1/migrate-from-oracle.md b/src/current/v24.1/migrate-from-oracle.md index fe0dc71455d..4d47ffebdac 100644 --- a/src/current/v24.1/migrate-from-oracle.md +++ b/src/current/v24.1/migrate-from-oracle.md @@ -6,7 +6,9 @@ docs_area: migrate --- {{site.data.alerts.callout_danger}} -The instructions on this page require updates. We currently recommend [using AWS Database Migration Service (DMS) to migrate data]({% link {{ page.version.version }}/aws-dms.md %}) from Oracle to CockroachDB. You can also [migrate from CSV]({% link {{ page.version.version }}/migrate-from-csv.md %}). +The instructions on this page are outdated. Use the [Schema Conversion Tool]({% link cockroachcloud/migrations-page.md %}?filters=oracle) to convert an Oracle schema into a compatible CockroachDB schema, and a tool such as [AWS Database Migration Service (DMS)]({% link {{ page.version.version }}/aws-dms.md %}) or [Qlik]({% link {{ page.version.version }}/qlik.md %}) to migrate data from Oracle to CockroachDB. + +Note that `IMPORT` is deprecated. To move data into CockroachDB, use [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) or [`COPY FROM`]({% link {{ page.version.version }}/copy-from.md %}). {{site.data.alerts.end}} This page has instructions for migrating data from Oracle into CockroachDB by [importing]({% link {{ page.version.version }}/import.md %}) CSV files. Note that `IMPORT` only works for creating new tables. For information on how to add CSV data to existing tables, see [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}). diff --git a/src/current/v24.1/migration-overview.md b/src/current/v24.1/migration-overview.md index 3912952a489..3e1372af46a 100644 --- a/src/current/v24.1/migration-overview.md +++ b/src/current/v24.1/migration-overview.md @@ -62,17 +62,17 @@ A lift-and-shift approach is the most straightforward. However, it's important t - *Reduced functionality* takes some, but not all, application functionality offline. For example, you can disable writes but not reads while you migrate the application data, and queue data to be written after completing the migration. -For an overview of lift-and-shift migrations to CockroachDB, see [Lift and Shift](#lift-and-shift). For considerations and details about the pros and cons of this approach, see [Migration Strategy: Lift and Shift]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}). +For an overview of lift-and-shift migrations to CockroachDB, see [Lift and Shift](#lift-and-shift). #### Minimal downtime -If your application cannot tolerate downtime, then you should aim for a "zero-downtime" approach. "Zero" means that downtime is reduced to either an absolute minimum or zero, such that users do not notice the migration. +If your application cannot tolerate downtime, then you should aim for a "zero-downtime" approach. This reduces downtime to an absolute minimum, such that users do not notice the migration. The minimum possible downtime depends on whether you can tolerate inconsistency in the migrated data: -- *Consistent* migrations reduce downtime to an absolute minimum (i.e., from 30 seconds to sub-seconds) while keeping data synchronized between the source database and CockroachDB. **Consistency requires downtime.** In this approach, downtime occurs right before [cutover](#cutover-strategy), as you drain the remaining transactions from the source database to CockroachDB. +- Migrations performed using *consistent cutover* reduce downtime to an absolute minimum (i.e., seconds or sub-seconds) while keeping data synchronized between the source database and CockroachDB. **Consistency requires downtime.** In this approach, downtime occurs right before [cutover](#cutover-strategy), as you drain the remaining transactions from the source database to CockroachDB. -- *Inconsistent* migrations can reduce downtime to zero. These require the most preparation, and typically allow read/write traffic to both databases for at least a small amount of time, thereby sacrificing consistency for availability. {% comment %}You can use the CockroachDB Live Migration Service (MOLT LMS) to run application queries simultaneously on your source database and CockroachDB.{% endcomment %} Without stopping application traffic, you perform an immediate [cutover](#cutover-strategy), while assuming that some writes will not be replicated to CockroachDB. You may want to manually reconcile these data inconsistencies after switching over. +- Migrations performed using *immediate cutover* can reduce downtime to zero. These require the most preparation, and typically allow read/write traffic to both databases for at least a short period of time, sacrificing consistency for availability. {% comment %}You can use the CockroachDB Live Migration Service (MOLT LMS) to run application queries simultaneously on your source database and CockroachDB.{% endcomment %} Without stopping application traffic, you perform an **immediate** [cutover](#cutover-strategy), while assuming that some writes will not be replicated to CockroachDB. You may want to manually reconcile these data inconsistencies after switching over. For an overview of zero-downtime migrations to CockroachDB, see [Zero Downtime](#zero-downtime). {% comment %}For details, see [Migration Strategy: Zero Downtime](migration-strategy-zero-downtime).{% endcomment %} @@ -245,9 +245,11 @@ Then import the converted schema to a CockroachDB cluster: Before moving data, Cockroach Labs recommends [dropping any indexes]({% link {{ page.version.version }}/drop-index.md %}) on the CockroachDB database. The indexes can be [recreated]({% link {{ page.version.version }}/create-index.md %}) after the data is loaded. Doing so will optimize performance. {{site.data.alerts.end}} -After [converting the schema](#convert-the-schema), load your data into CockroachDB so that you can [test your application queries](#validate-queries). Then use one of the following methods to migrate the data (you may need to use additional tooling to extract and/or convert the data to an appropriate file format): +After [converting the schema](#convert-the-schema), load your data into CockroachDB so that you can [test your application queries](#validate-queries). Then use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. -- {% include {{ page.version.version }}/migration/load-data-import-into.md %} Typically, initial data loading during a database migration will not be running concurrently with application traffic, so the fact that `IMPORT INTO` takes the table offline may not have any observable availability impact. +Alternatively, you can use one of the following methods to migrate the data. Additional tooling may be required to extract or convert the data to a supported file format. + +- {% include {{ page.version.version }}/migration/load-data-import-into.md %} Typically during a migration, data is initially loaded before foreground application traffic begins to be served, so the impact of taking the table offline when running `IMPORT INTO` may be minimal. - {% include {{ page.version.version }}/migration/load-data-third-party.md %} Within the tool, you can select the database tables to migrate to the test cluster. - {% include {{ page.version.version }}/migration/load-data-copy-from.md %} @@ -259,9 +261,9 @@ Note that CockroachDB defaults to the [`SERIALIZABLE`]({% link {{ page.version.v ##### Shadowing -You can "shadow" your production workload by executing your source SQL statements on CockroachDB in parallel. The [CockroachDB Live Migration Service (LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) can perform shadowing. You can then [test the queries](#test-query-results-and-performance) on CockroachDB for consistency, performance, and potential issues with the migration. +You can "shadow" your production workload by executing your source SQL statements on CockroachDB in parallel. You can then [validate the queries](#test-query-results-and-performance) on CockroachDB for consistency, performance, and potential issues with the migration. -Shadowing may not be necessary or practical for your workload. For example, because transactions are serialized on CockroachDB, this will limit your ability to validate the performance of high-throughput workloads. +The [CockroachDB Live Migration Service (MOLT LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) can [perform shadowing]({% link {{ page.version.version }}/live-migration-service.md %}#shadowing-modes). This is intended only for [testing](#test-query-results-and-performance) or [performing a dry run](#perform-a-dry-run). Shadowing should **not** be used in production when performing a [live migration](#zero-downtime). ##### Test query results and performance @@ -310,35 +312,32 @@ Using this method, consistency is achieved by only performing the cutover once a The following is a high-level overview of the migration steps. For considerations and details about the pros and cons of this approach, see [Migration Strategy: Lift and Shift]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}). 1. Stop application traffic to your source database. **This begins downtime.** -1. Move data in one of the following ways: - - {% include {{ page.version.version }}/migration/load-data-import-into.md %} - - {% include {{ page.version.version }}/migration/load-data-third-party.md %} - - {% include {{ page.version.version }}/migration/load-data-copy-from.md %} -1. After the data is migrated, you can use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. +1. Use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. +1. After the data is migrated, use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. 1. Perform a [cutover](#cutover-strategy) by resuming application traffic, now to CockroachDB. {% comment %}1. If you want the ability to [roll back](#all-at-once-rollback) the migration, replicate data back to the source database.{% endcomment %} ### Zero Downtime -Using this method, downtime is minimized by performing the cutover while writes are still being replicated from the source database to CockroachDB. Inconsistencies are resolved through manual reconciliation. - -The following is a high-level overview of the migration steps. {% comment %}For details on this migration strategy, see [Migration Strategy: Zero Downtime]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}).{% endcomment %} +During a "live migration", downtime is minimized by performing the cutover while writes are still being replicated from the source database to CockroachDB. Inconsistencies are resolved through manual reconciliation. -{% comment %}You can use the CockroachDB Live Migration Service (MOLT LMS) to run application queries simultaneously on your source database and CockroachDB.{% endcomment %} +The following is a high-level overview of the migration steps. The two approaches are mutually exclusive, and each has [tradeoffs](#minimal-downtime). {% comment %}For details on this migration strategy, see [Migration Strategy: Zero Downtime]({% link {{ page.version.version }}/migration-strategy-lift-and-shift.md %}).{% endcomment %} To prioritize consistency and minimize downtime: -1. {% include {{ page.version.version }}/migration/load-data-third-party.md %} Select the tool's option to **replicate ongoing changes** after performing the initial load of data into CockroachDB. -1. As the data is migrating, you can use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. -1. Once nearly all data from your source database has been moved to CockroachDB (for example, with a <1 second delay or <1000 rows), stop application traffic to your source database. **This begins downtime.** -1. Wait for replication to CockroachDB to complete. -1. Perform a [cutover](#cutover-strategy) by resuming application traffic, now to CockroachDB. +1. Set up the [CockroachDB Live Migration Service (MOLT LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) to proxy for application traffic between your source database and CockroachDB. Do **not** shadow the application traffic. +1. Use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. Use the tool to [**replicate ongoing changes**]({% link {{ page.version.version }}/molt-fetch.md %}#replication) after it performs the initial load of data into CockroachDB. +1. As the data is migrating, use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. +1. After nearly all data from your source database has been moved to CockroachDB (for example, with a <1-second delay or <1000 rows), use MOLT LMS to begin a [*consistent cutover*]({% link {{ page.version.version }}/live-migration-service.md %}#consistent-cutover) and stop application traffic to your source database. **This begins downtime.** +1. Wait for MOLT Fetch to finish replicating changes to CockroachDB. +1. Use MOLT LMS to commit the [consistent cutover]({% link {{ page.version.version }}/live-migration-service.md %}#consistent-cutover). This resumes application traffic, now to CockroachDB. To achieve zero downtime with inconsistency: -1. {% include {{ page.version.version }}/migration/load-data-third-party.md %} Select the tool's option to replicate ongoing changes after performing the initial load of data into CockroachDB. +1. Set up the [CockroachDB Live Migration Service (MOLT LMS)]({% link {{ page.version.version }}/live-migration-service.md %}) to proxy for application traffic between your source database and CockroachDB. Use a [shadowing mode]({% link {{ page.version.version }}/live-migration-service.md %}#shadowing-modes) to run application queries simultaneously on your source database and CockroachDB. +1. Use [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %}) to move the source data to CockroachDB. Use the tool to **replicate ongoing changes** after performing the initial load of data into CockroachDB. 1. As the data is migrating, you can use [MOLT Verify]({% link {{ page.version.version }}/molt-verify.md %}) to validate the consistency of the data between the source database and CockroachDB. -1. Once nearly all data from your source database has been moved to CockroachDB (for example, with a <1 second delay or <1000 rows), perform a [cutover](#cutover-strategy) by pointing application traffic to CockroachDB. +1. After nearly all data from your source database has been moved to CockroachDB (for example, with a <1 second delay or <1000 rows), perform an [*immediate cutover*](#cutover-strategy) by pointing application traffic to CockroachDB. 1. Manually reconcile any inconsistencies caused by writes that were not replicated during the cutover. 1. Close the connection to the source database when you are ready to finish the migration. diff --git a/src/current/v24.1/molt-verify.md b/src/current/v24.1/molt-verify.md index a0e31e8bc50..0c45ca3343f 100644 --- a/src/current/v24.1/molt-verify.md +++ b/src/current/v24.1/molt-verify.md @@ -73,7 +73,7 @@ Flag | Description ----------|------------ `--source` | (Required) Connection string for the source database. `--target` | (Required) Connection string for the target database. -`--concurrency` | Number of shards to process at a time.
**Default:** 16
For faster verification, set this flag to a higher value. {% comment %}
Note: Table splitting by shard only works for [`INT`]({% link {{ page.version.version }}/int.md %}), [`UUID`]({% link {{ page.version.version }}/uuid.md %}), and [`FLOAT`]({% link {{ page.version.version }}/float.md %}) data types.{% endcomment %} +`--concurrency` | Number of threads to process at a time when reading the tables.
**Default:** 16
For faster verification, set this flag to a higher value. {% comment %}
Note: Table splitting by shard only works for [`INT`]({% link {{ page.version.version }}/int.md %}), [`UUID`]({% link {{ page.version.version }}/uuid.md %}), and [`FLOAT`]({% link {{ page.version.version }}/float.md %}) data types.{% endcomment %} `--row-batch-size` | Number of rows to get from a table at a time.
**Default:** 20000 `--table-filter` | Verify tables that match a specified [regular expression](https://wikipedia.org/wiki/Regular_expression). `--schema-filter` | Verify schemas that match a specified [regular expression](https://wikipedia.org/wiki/Regular_expression). @@ -117,15 +117,18 @@ When verification completes, the output displays a summary message like the foll - `num_success` is the number of rows that matched. - `num_conditional_success` is the number of rows that matched while having a column mismatch due to a type difference. This value indicates that all other columns that could be compared have matched successfully. You should manually review the warnings and errors in the output to determine whether the column mismatches can be ignored. -## Limitations +## Known limitations -- While verifying data, MOLT Verify pages 20,000 rows at a time by default, and row values can change between batches, which can lead to temporary inconsistencies in data. Enable `--live` mode to have the tool retry verification on these rows. You can also change the row batch size using the `--row_batch_size` [flag](#flags). -- MySQL enums and set types are not supported. +- MOLT Verify compares 20,000 rows at a time by default, and row values can change between batches, potentially resulting in temporary inconsistencies in data. If `--live` mode is enabled, MOLT Verify retries verification on these rows. To configure the row batch size, use the `--row_batch_size` [flag](#flags). - MOLT Verify checks for collation mismatches on [primary key]({% link {{ page.version.version }}/primary-key.md %}) columns. This may cause validation to fail when a [`STRING`]({% link {{ page.version.version }}/string.md %}) is used as a primary key and the source and target databases are using different [collations]({% link {{ page.version.version }}/collate.md %}). -- MOLT Verify only supports comparing one MySQL database to a whole CockroachDB schema (which is assumed to be `public`). - MOLT Verify might give an error in case of schema changes on either the source or target database. - [Geospatial types]({% link {{ page.version.version }}/spatial-data-overview.md %}#spatial-objects) cannot yet be compared. +The following limitations are specific to MySQL: + +- MySQL enums and set types are not supported. +- MOLT Verify only supports comparing one MySQL database to a whole CockroachDB schema (which is assumed to be `public`). + ## See also - [MOLT Fetch]({% link {{ page.version.version }}/molt-fetch.md %})