From b040dea67cd8e13daf9d2f15a3562289f98a3901 Mon Sep 17 00:00:00 2001 From: stoobie Date: Wed, 9 Feb 2022 15:03:39 +0200 Subject: [PATCH] MIG-1017 Initial commit, rough draft. --- .../advanced-migration-options-3-4.adoc | 7 ++ modules/migration-adding-cluster-to-cam.adoc | 2 +- .../migration-migrating-on-prem-to-cloud.adoc | 118 ++++++++++++++++++ 3 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 modules/migration-migrating-on-prem-to-cloud.adoc diff --git a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc index 9ac21df97c5f..512ecd9dc2e0 100644 --- a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc +++ b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc @@ -11,6 +11,13 @@ You can automate your migrations and modify the `MigPlan` and `MigrationControll include::modules/migration-terminology.adoc[leveloffset=+1] +include::modules/migration-migrating-on-prem-to-cloud.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources +* For information about creating a MigCluster CR manifest for each remote cluster, see xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-migrating-applications-api_advanced-migration-options-3-4[Migrating an application by using the {mtc-short} API]. +* For information about adding a cluster using the web console, see xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migrating-applications-mtc-web-console_migrating-applications-3-4[Migrating your applications by using the {mtc-short} web console] + [id="migrating-applications-cli_{context}"] == Migrating applications by using the command line diff --git a/modules/migration-adding-cluster-to-cam.adoc b/modules/migration-adding-cluster-to-cam.adoc index cfc83eccef98..d91bf3b690ff 100644 --- a/modules/migration-adding-cluster-to-cam.adoc +++ b/modules/migration-adding-cluster-to-cam.adoc @@ -15,7 +15,7 @@ You can add a cluster to the {mtc-full} ({mtc-short}) web console. ** You must specify the Azure resource group name for the cluster. ** The clusters must be in the same Azure resource group. ** The clusters must be in the same geographic location. -* If you are using direct image migration, you must expose a route to +* If you are using direct image migration, you must expose a route to the image registry of the source cluster. .Procedure diff --git a/modules/migration-migrating-on-prem-to-cloud.adoc b/modules/migration-migrating-on-prem-to-cloud.adoc new file mode 100644 index 000000000000..6b76f8b745f9 --- /dev/null +++ b/modules/migration-migrating-on-prem-to-cloud.adoc @@ -0,0 +1,118 @@ +// Module included in the following assemblies: +// +// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc +// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc +:_content-type: PROCEDURE +[id="migration-migrating-applications-on-prem-to-cloud_{context}"] += Migrating an application from on-premises to a cloud-based cluster + +You can migrate from a source cluster that is behind a firewall to a cloud-based destination cluster by establishing a network tunnel between the two clusters. The `crane tunnel-api` command establishes such a tunnel by creating a VPN tunnel on the source cluster and then connecting to a VPN server running on the destination cluster. The VPN server is exposed to the client using a load balancer address on the destination cluster. + +A service created on the destination cluster exposes the source cluster's API to {mtc-short}, which is running on the destination cluster. + +.Prerequisites + +* The system that creates the VPN tunnel must have access and be logged in to both clusters. +* It must be possible to create a load balancer on the destination cluster. Refer to your cloud provider to ensure this is possible. +* Have names prepared to assign to namespaces, on both the source cluster and the destination cluster, in which to run the VPN tunnel. These namespaces should not be created in advance. For information about namespace rules, see \https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names. +* When connecting multiple firewall-protected source clusters to the cloud cluster, each source cluster requires its own namespace. +* OpenVPN server is installed on the destination cluster. +* OpenVPN client is installed on the source cluster. +* When configuring the source cluster in {mtc-short}, the API URL takes the form of `\https://proxied-cluster..svc.cluster.local:8443`. +** If you use the API, see _Create a MigCluster CR manifest for each remote cluster_. +** If you use the {mtc-short} web console, see _Migrating your applications using the {mtc-short} web console_. +* The {mtc-short} web console and Migration Controller must be installed on the target cluster. + +.Procedure + +. Install the `crane` utility: ++ +[source,terminal,subs="+quotes"] +---- +$ podman cp $(podman create registry.redhat.io/rhmtc/openshift-migration-controller-rhel8:v1.7.0):/crane ./ +---- +. Log in remotely to a node on the source cluster and a node on the destination cluster. + +. Obtain the cluster context for both clusters after logging in: ++ +[source,terminal,subs="+quotes"] +---- +$ oc config view +---- + +. Establish a tunnel by entering the following command on the command system: ++ +[source,terminal,sub="+quotes"] +---- +$ crane tunnel-api [--namespace ] \ + --destination-context \ + --source-context +---- ++ +If you don't specify a namespace, the command uses the default value `openvpn`. ++ +For example: ++ +[source,terminal,subs="+quotes"] +---- +$ crane tunnel-api --namespace my_tunnel \ + --destination-context openshift-migration/c131-e-us-east-containers-cloud-ibm-com/admin \ + --source-context default/192-168-122-171-nip-io:8443/admin +---- ++ +[TIP] +==== +See all available parameters for the `crane tunnel-api` command by entering `crane tunnel-api --help`. +==== ++ +The command generates TSL/SSL Certificates. This process might take several minutes. A message appears when the process completes. ++ +The OpenVPN server starts on the destination cluster and the OpenVPN client starts on the source cluster. ++ +After a few minutes, the load balancer resolves on the source node. ++ +[TIP] +==== +You can view the log for the OpenVPN pods to check the status of this process by entering the following commands with root privileges: + +[source,terminal,subs="+quotes"] +---- +# oc get po -n +---- + +.Example output +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE + 2/2 Running 0 44s +---- + +[source,terminal,subs="+quotes"] +---- +# oc logs -f -n -c openvpn +---- +When the address of the load balancer is resolved, the message `Initialization Sequence Completed` appears at the end of the log. +==== + +. On the OpenVPN server, which is on a destination control node, verify that the `openvpn` service and the `proxied-cluster` service are running: ++ +[source,terminal,subs="+quotes"] +---- +$ oc get service -n +---- + +. On the source node, get the service account (SA) token for the migration controller: ++ +[source,terminal] +---- +# oc sa get-token -n openshift-migration migration-controller +---- + +. Open the {mtc-short} web console and add the source cluster, using the following values: ++ +* *Cluster name*: The source cluster name. +* *URL*: `proxied-cluster..svc.cluster.local:8443`. If you did not define a value for ``, use `openvpn`. +* *Service account token*: The token of the migration controller service account. +* *Exposed route host to image registry*: `proxied-cluster..svc.cluster.local:5000`. If you did not define a value for ``, use `openvpn`. + +After {mtc-short} has successfully validated the connection, you can proceed to create and run a migration plan. The namespace for the source cluster should appear in the list of namespaces.