diff --git a/ci/vale/dictionary.txt b/ci/vale/dictionary.txt index c1b9a0b15bb..bc6ded96b76 100644 --- a/ci/vale/dictionary.txt +++ b/ci/vale/dictionary.txt @@ -2569,6 +2569,7 @@ tooltips torvalds TOTPs towson +tpslimit trac tracebacks traceroute diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-WebUI-Login.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-WebUI-Login.jpg new file mode 100644 index 00000000000..e8b1b842e2c Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-WebUI-Login.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-global-stats.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-global-stats.jpg new file mode 100644 index 00000000000..2b5b0de6308 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-global-stats.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-speed.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-speed.jpg new file mode 100644 index 00000000000..4f5f8d75b07 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/Rclone-speed.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/S3-to-OBJ-Arch1.png b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/S3-to-OBJ-Arch1.png new file mode 100644 index 00000000000..079d8ddcf21 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/S3-to-OBJ-Arch1.png differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/S3-to-OBJ-Arch2.png b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/S3-to-OBJ-Arch2.png new file mode 100644 index 00000000000..354bb34097a Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/S3-to-OBJ-Arch2.png differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/index.md b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/index.md new file mode 100644 index 00000000000..72eeabd5e4d --- /dev/null +++ b/docs/guides/platform/migrate-to-linode/migrate-from-aws-s3-to-linode-object-storage/index.md @@ -0,0 +1,309 @@ +--- +slug: migrate-from-aws-s3-to-linode-object-storage +title: "How to Migrate From AWS S3 to Linode Object Storage" +description: "This guide includes steps for how to migrate content from AWS S3 to Linode Object Storage using rclone." +authors: ["John Dutton"] +contributors: ["John Dutton"] +published: 2024-09-23 +keywords: ['migrate','migration','object storage','aws','s3','rclone'] +license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)' +aliases: ['/guides/migrate-to-linode-object-storage/'] +external_resources: +- '[Linode Object Storage product documentation](https://techdocs.akamai.com/cloud-computing/docs/object-storage)' +- '[Linode Object Storage guides & tutorials](/docs/guides/platform/object-storage/)' +--- + +Linode Object Storage is an S3-compatible service used for storing large amounts of unstructured data. This guide includes steps on how to migrate up to 100TB of static content from AWS S3 to Linode Object Storage using rclone, along with how to monitor your migration using rclone’s WebUI GUI. + +## Migration Considerations + +- **Migration time:** Migration time varies depending on various factors, including: the size and type of data being copied, the number of overall objects, network conditions, and the hardware limits of the infrastructure involved (i.e. bandwidth and throughput limits, network interfaces, CPU cores, RAM, etc.). + +- **Egress:** Egress is the measurement of outbound data being transferred and often results in a cost to the user. Egress costs may vary depending on provider rates and the amount of data being transferred. See your provider for specific egress rates. + +- **Bucket architecture:** The example in this guide shows steps for migrating content from a single object storage bucket. + + Note that [AWS scales by prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/optimizing-performance.html) within a single bucket, whereas Linode Object Storage scales per bucket with each bucket having its own rate limit. As a result, AWS content may need to be moved by prefix to individual Object Storage buckets to meet your specific performance requirements. See: [Object Storage: Optimize Applications to Avoid Rate Limiting](https://techdocs.akamai.com/cloud-computing/docs/object-storage#optimize-applications-to-avoid-rate-limiting) + + Migrating content and changing over workloads intermittently is recommended to ensure maximum uptime and reliability. + +- **Multiple machines running in parallel:** This guide provides steps for initiating and monitoring a single rclone copy job for a single object storage bucket. One option for speeding up a distributed workload migration is to run multiple rclone instances to migrate multiple buckets at the same time. + + See Linode Object Storage’s [technical specifications](https://techdocs.akamai.com/cloud-computing/docs/object-storage#technical-specifications-and-considerations) for rate and bandwidth limits if you are running multiple virtual machines in parallel. + +- **Large amounts of data:** This guide assumes you are migrating less than 100 million objects and 100TB of static data. If you require a larger amount of data transferred, contact our [sales](https://www.akamai.com/why-akamai/contact-us/contact-sales) and [professional services](https://www.akamai.com/site/en/documents/akamai/akamai-professional-services-and-support.pdf) teams. + +## Migration Architecture Diagram + +There are two architecture options for completing a data migration from AWS S3 to Linode Object Storage. One of these architectures is required to be in place prior to initiating the data migration: + +**Architecture 1:** Utilizes an EC2 instance running rclone in the same region as the source S3 bucket. Data is transferred internally from the S3 bucket to the EC2 instance and then over the public internet from the EC2 instance to the target Linode Object Storage bucket. + +- **Recommended for:** speed of transfer, users with AWS platform familiarity + +**Architecture 2:** Utilizes a Linode instance running rclone in the same region as the target Object Storage bucket. Data is transferred over the public internet from the AWS S3 bucket to the Linode instance and then internally via IPv6 to the Linode Object Storage bucket. + +- **Recommended for:** ease of implementation, users with Akamai platform familiarity + +{{< note title="Rclone performance" >}} +Rclone generally performs better when placed closer to the source data being copied. During testing for both architectures, Architecture 1 achieved about 20% higher transfer speed than Architecture 2. +{{< /note >}} + +### Architecture 1 + +1. A source AWS S3 bucket with the content to be transferred. + +1. An AWS EC2 instance running rclone in the same region as the source S3 bucket. The S3 bucket communicates with the EC2 instance via VPC Endpoint within the AWS region. Your IAM policy should allow S3 access only via your VPC Endpoint. + +1. Data is copied across the public internet from the AWS EC2 instance to a target Linode Object Storage bucket. This results in egress (outbound traffic) being calculated by AWS. + +1. The target Linode Object Storage bucket receives data from the EC2 instance. The migration status can be monitored using rclone’s WebUI. + +![S3-to-OBJ-Arch1](S3-to-OBJ-Arch1.png) + +### Architecture 2 + +1. A source AWS S3 bucket with the content to be transferred. + +1. A Compute Instance running rclone in the same Akamai core compute region as the target Linode Object Storage bucket. + +1. Data is copied across the public internet from the AWS S3 bucket to the target Linode instance. This results in egress being calculated by AWS. + +1. The target Linode Object Storage bucket receives the data via IPv6 from the Compute Instance on the region’s private network. Inbound, private IPv6 data to Linode Object Storage is free of charge. The migration status can be monitored using rclone’s WebUI. + +![S3-to-OBJ-Arch2](S3-to-OBJ-Arch2.png) + +## Prerequisites and Required Information + +- **A virtual machine with rclone installed**. This guide recommends a 16GB dedicated virtual machine with 8 CPU cores. The plan you require may vary depending on your workload. + +- The **public IPv4 address** of your virtual machine. + +- As a security best practice, **use a firewall to only allow inbound port 5572**. This is the default port used by rclone and enables more secure access to the WebUI since it is served over HTTP. For an additional layer of security, consider setting up an HTTPS gateway. + +- **An up-to-date web browser**. This is used to access the rclone WebUI while monitoring the migration. + +- **SSH access to the virtual machine** with sudo user privileges. + +- An existing **AWS S3 bucket** with: + + - IAM policy allowing S3 access only to VPC Endpoint + - Bucket name + - Access key + - Secret key + - Region ID + +- If using Architecture 1, there must be a [VPC gateway endpoint created](https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html) for S3 in the same VPC where your EC2 instance is deployed. This should be the same region as your S3 bucket. + +- An **existing Linode Object Storage bucket** with: + + - Bucket name + - Access key + - Secret key + - Region ID and endpoint URL + + {{< note title="Object Storage Access Keys" >}} + When creating Object Storage access keys, it is a best practice to limit individual bucket access by region along with read/write permissions. See: [Manage access keys](https://techdocs.akamai.com/cloud-computing/docs/manage-access-keys) + {{< /note >}} + +## Migration Steps + +### Initiating the Data Migration + +1. On the instance running rclone, configure rclone to communicate with your source AWS S3 bucket and your target Linode Object Storage bucket. + + To view the location of the rclone config file, run: + + ```command + rclone config file + ``` + + If the file does not exist yet, you should see output similar to the following: + + ```output + Configuration file doesn't exist, but rclone will use this path: + /home/user/.config/rclone/rclone.conf + ``` + +1. Using the text editor of your choice, add the following configuration to your config file. Replace the following fields with your own corresponding provider and bucket values. Save your changes when complete: + + **AWS S3** + - {{< placeholder "AWS-ACCESS-KEY" >}}: Your AWS access key + - {{< placeholder "AWS-SECRET-KEY" >}}: Your AWS secret key + - {{< placeholder "us-east-1" >}}: The AWS region ID for your S3 bucket + + **Linode Object Storage** + - {{< placeholder "LINODE-ACCESS-KEY" >}}: Your Linode Object Storage access key + - {{< placeholder "LINODE-SECRET-KEY" >}}: Your Linode Object Storage secret key + - {{< placeholder "us-lax-1" >}}: The region ID for your Linode Object Storage bucket + + ```file + [aws] + type = s3 + provider = AWS + access_key_id = {{< placeholder "AWS-ACCESS-KEY" >}} + secret_access_key = {{< placeholder "AWS-SECRET-KEY" >}} + region = {{< placeholder "us-east-1" >}} + + [linode] + type = s3 + provider = Ceph + access_key_id = {{< placeholder "LINODE-ACCESS-KEY" >}} + secret_access_key = {{< placeholder "LINODE-SECRET-KEY" >}} + endpoint = {{< placeholder "us-lax-1" >}}.linodeobjects.com + acl = private + ``` + + {{< note title="Rclone Providers" >}} + The lines `[aws]` and `[linode]` define the remote providers for your source and target endpoints, respectively. See [Supported Providers](https://rclone.org/#providers) for a complete list of supported rclone providers. + {{< /note >}} + +1. Confirm connectivity to AWS S3 using your defined remote provider, `aws`: + + ```command + rclone lsd aws: + ``` + + If successful, you should see a list of available buckets: + + ```output + -1 2024-08-30 09:10:47 -1 aws-bucket-name + ``` + +1. Confirm connectivity to Linode Object Storage using the other defined remote provider, `linode`: + + ```command + rclone lsd linode: + ``` + + Similar to above, you should see a list of available buckets: + + ```output + -1 2024-08-28 14:46:47 -1 linode-bucket-name + ``` + +1. Run the rclone copy command to initiate the migration. + + Replace {{< placeholder "aws-bucket-name" >}} and {{< placeholder "linode-bucket-name" >}} with the names of your AWS S3 and Linode Object Storage buckets, respectively. Replace {{< placeholder "USERNAME" >}} and {{< placeholder "PASSWORD" >}} with the username and password you want to use to access the rclone WebUI. + + If using Architecture 2, also include the `--bind ::0` flag to write data from your Compute Instance to your Object Storage bucket using IPv6: + + ```command + rclone copy aws:{{< placeholder "aws-bucket-name" >}}/ linode:{{< placeholder "linode-bucket-name" >}}/ --transfers 50 --rc --rc-addr=0.0.0.0:5572 --log-file=rclone.log --log-level=ERROR --rc-web-gui --rc-user {{< placeholder "USERNAME" >}} --rc-pass {{< placeholder "PASSWORD" >}} + ``` + +#### Rclone Copy Command Breakdown + +- `aws:aws-bucket-name/`: The AWS remote provider and source S3 bucket. Including the slash at the end informs the `copy` command to include everything within the bucket. + +- `linode:linode-bucket-name/`: The Linode remote provider and target Object Storage bucket. + +- `--transfers 50`: The `transfers` flag tells rclone how many items to transfer in parallel. Defaults to a value of 4. `50` here speeds up the transfer process by moving up to 50 items in parallel at a given time. + + Your `transfers` value may be different depending on how many objects you are transferring, and you may need to experiment to find the value that works best for your use case. High enough values may result in bandwidth limits being reached. Increasing this value also increases the CPU usage used by rclone. + +- `--rc`: Stands for “remote control”. The `rc` option deploys the http listen server for remote requests. + +- `--rc-addr=0.0.0.0:5572`: Specifies the web address and port number used to access the WebUI GUI. `0.0.0.0` instructs the remote to listen on all IPv4 addresses, and `5572` is the default port number used by rclone to access the WebUI. + +- `--log-file=rclone.log`: The file where rclone writes logs. This file is created in the working directory from where the `copy` command is run. + +- `--log-level=ERROR`: The type of logs to be written to your log file. `ERROR` here specifies only errors are written to the `rclone.log` file. + +- `--rc-web-gui`: Serves the WebUI GUI on the default rclone port (5572). + +- `--rc-user {{< placeholder "USERNAME" >}} and --rc-pass {{< placeholder "PASSWORD" >}}`: The username and password used to access the WebUI GUI. + +{{< note title="Using the htpasswd flag" >}} +An alternative to the `--rc-user` and `--rc-pass` combination is the `--rc-htpasswd` flag. This creates a `htpasswd` file containing a generated username and password combination you can use to log into the rclone WebUI. See [Remote controlling rclone with its API](https://rclone.org/rc/#rc-htpasswd-path) +{{< /note >}} + +- `--bind ::0` (for use with Architecture 2): Tells rclone to write data via IPv6. Note that writing data over IPv6 from a Linode instance to an Object Storage bucket in the same region is free of charge. + +#### Optional Flags + +- `--tpslimit {{< placeholder "XXX" >}}` : Specifies the number of HTTP transactions per second. For larger transfers, it is considered a best practice to set the `tpslimit` below the infrastructure requests per second (rps) limit. Should an rps limit be reached, a 503 `SlowDown` error may result. + + **Example:** If the infrastructure’s requests per second limit is 750 rps, set the tpslimit to 725: + + ```command + --tpslimit {{< placeholder "725" >}} + ``` + +### Monitoring the Migration + +To monitor the status of the `rclone copy` command above, you can access the rclone WebUI GUI from a web browser. + +1. In a web browser window, navigate to your instance’s address over port 5572. Replace {{< placeholder "IP-ADDRESS" >}} with the IPv4 address of your instance: + + ```command + http://{{< placeholder "IP-ADDRESS" >}}:5572 + ``` + +1. When prompted, enter the username and password you specified with the `--rc-user` and `--rc-pass` flags: + + ![Rclone-WebUI-Login](Rclone-WebUI-Login.jpg) + +1. Once logged in, you should see active running jobs along with multiple monitoring statistics, including: job status, throughput and speed, bandwidth max speed, total objects transferred, amount of data transferred, and more. + + ![Rclone-global-stats](Rclone-global-stats.jpg) + + ![Rclone-speed](Rclone-speed.jpg) + +1. The WebUI will disconnect automatically when the copy job is complete. + +## Verify the Migration + +You can compare the number of objects in both your source and target buckets along with the total size of the buckets to verify full completion of the copy job. + +### From the Command Line + +**AWS S3:** + +```command +rclone size aws:aws-bucket-name/ +``` + +```output +Total objects: 87.275k (87275) +Total size: 647.612 GiB (695368455398 Byte) +``` + +**Linode Object Storage:** + +```command +rclone size linode:linode-bucket-name/ +``` + +```output +Total objects: 87.275k (87275) +Total size: 647.612 GiB (695368455398 Byte) +``` + +### From a Browser + +Alternatively, you can compare the number of objects and total bucket sizes from the AWS Management Console and Cloud Manager on Akamai: + +**AWS Management Console:** + +- Navigate to **Amazon S3** +- Select the source bucket name +- Click the **Metrics** tab +- Under **Bucket metrics**, see **Total bucket size** and **Total number of objects** + + +**Cloud Manager:** + +- Navigate to **Object Storage** +- Find your target bucket name +- See the **Size** column for the total bucket size and the **Objects** column for the total number of objects + +## Next Steps + +There are several next steps to consider after a successful object storage migration: + +- **Change over your object storage endpoints to your new target bucket.** For example, if you have backups or logs being sent to your old source bucket, edit your jobs to point to the new bucket endpoints. + +- **Edit your configurations to match your new object storage credentials.** In addition to changing the target endpoints for your workloads, your access keys and secret keys need to be updated to your new Linode Object Storage keys. + +- **Confirm the changeover is functioning as expected.** Allow some time to make sure your updated workloads and jobs are interacting successfully with Linode Object Storage. Once you confirm everything is working as expected, you can safely delete the original source bucket and its contents. \ No newline at end of file diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Azure-to-OBJ-Arch1.png b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Azure-to-OBJ-Arch1.png new file mode 100644 index 00000000000..c8414714213 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Azure-to-OBJ-Arch1.png differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Azure-to-OBJ-Arch2.png b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Azure-to-OBJ-Arch2.png new file mode 100644 index 00000000000..58da1bdc0ed Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Azure-to-OBJ-Arch2.png differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-WebUI-Login.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-WebUI-Login.jpg new file mode 100644 index 00000000000..e8b1b842e2c Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-WebUI-Login.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-global-stats.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-global-stats.jpg new file mode 100644 index 00000000000..2b5b0de6308 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-global-stats.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-speed.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-speed.jpg new file mode 100644 index 00000000000..4f5f8d75b07 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/Rclone-speed.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/index.md b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/index.md new file mode 100644 index 00000000000..7b309a636e1 --- /dev/null +++ b/docs/guides/platform/migrate-to-linode/migrate-from-azure-blob-storage-to-linode-object-storage/index.md @@ -0,0 +1,308 @@ +--- +slug: migrate-from-azure-blob-storage-to-linode-object-storage +title: "How to Migrate From Azure Blob Storage to Linode Object Storage" +description: "This guide includes steps for how to migrate content from Azure Blob Storage to Linode Object Storage using rclone." +authors: ["John Dutton"] +contributors: ["John Dutton"] +published: 2024-09-23 +keywords: ['migrate','migration','object storage','azure blob storage','rclone'] +license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)' +external_resources: +- '[Linode Object Storage product documentation](https://techdocs.akamai.com/cloud-computing/docs/object-storage)' +- '[Linode Object Storage guides & tutorials](/docs/guides/platform/object-storage/)' +--- + +Linode Object Storage is an S3-compatible service used for storing large amounts of unstructured data. This guide includes steps on how to migrate up to 100TB of static content from Azure Blob Storage to Linode Object Storage using rclone, along with how to monitor your migration using rclone’s WebUI GUI. + +## Migration Considerations + +- **Migration time:** Migration time varies depending on various factors, including: the size and type of data being copied, the number of overall objects, network conditions, and the hardware limits of the infrastructure involved (i.e. bandwidth and throughput limits, network interfaces, CPU cores, RAM, etc.). + +- **Egress:** Egress is the measurement of outbound data being transferred and often results in a cost to the user. Egress costs may vary depending on provider rates and the amount of data being transferred. See your provider for specific egress rates. + +- **Bucket architecture:** The example in this guide shows steps for migrating content from a single Azure storage container to a single Linode Object Storage bucket. + + Azure blobs live within containers with throughput rates scaled by blob, whereas Linode Object Storage scales per bucket with each bucket having its own rate limit. How you map from containers or blobs to Object Storage buckets will vary depending on your use case and performance needs. See: [Scalability and performance targets for Blob storage](https://learn.microsoft.com/en-us/azure/storage/blobs/scalability-targets) + + Migrating content and changing over workloads intermittently is recommended to ensure maximum uptime and reliability. + +- **Multiple machines running in parallel:** This guide provides steps for initiating and monitoring a single rclone copy job for a single object storage bucket. One option for speeding up a distributed workload migration is to run multiple rclone instances to migrate multiple buckets at the same time. + + See Linode Object Storage’s [technical specifications](https://techdocs.akamai.com/cloud-computing/docs/object-storage#technical-specifications-and-considerations) for rate and bandwidth limits if you are running multiple virtual machines in parallel. + +- **Large amounts of data:** This guide assumes you are migrating less than 100 million objects and 100TB of static data. If you require a larger amount of data transferred, contact our [sales](https://www.akamai.com/why-akamai/contact-us/contact-sales) and [professional services](https://www.akamai.com/site/en/documents/akamai/akamai-professional-services-and-support.pdf) teams. + +## Migration Architecture Diagram + +There are two architecture options for completing a data migration from Azure Blob Storage to Linode Object Storage. One of these architectures is required to be in place prior to initiating the data migration: + +**Architecture 1:** Utilizes an Azure Virtual Machine running rclone in the same region as the source Blob Storage container. Data is transferred internally from the Blob Storage container to the Virtual Machine and then over the public internet from the Virtual Machine to the target Linode Object Storage bucket. + +- **Recommended for:** speed of transfer, users with Azure platform familiarity + +**Architecture 2:** Utilizes a Linode instance running rclone in the same region as the target Object Storage bucket. Data is transferred over the public internet from the Blob Storage container to the Linode instance and then internally via IPv6 to the Linode Object Storage bucket. + +- **Recommended for:** ease of implementation, users with Akamai platform familiarity + +{{< note title="Rclone performance" >}} +Rclone generally performs better when placed closer to the source data being copied. During testing for both architectures, Architecture 1 achieved about 20% higher transfer speed than Architecture 2. +{{< /note >}} + +### Architecture 1 + +1. A source Azure Blob Storage container with the content to be transferred. + +1. An Azure Virtual Machine running rclone in the same region as the source container. The Azure Blob Storage container communicates with the Virtual Machine through a Service Endpoint within the Azure region inside a Virtual Network. + +1. Data is copied across the public internet from the Azure Virtual Machine to a target Linode Object Storage bucket. This results in egress (outbound traffic) being calculated by Azure. + +1. The target Linode Object Storage bucket receives data from the source Azure Virtual Machine. The migration status can be monitored using rclone’s WebUI. + +![Azure-to-OBJ-Arch1](Azure-to-OBJ-Arch1.png) + +### Architecture 2 + +1. A source Azure Blob Storage container with the content to be transferred. + +1. A Compute Instance running rclone in the same Akamai core compute region as the target Linode Object Storage bucket. + +1. Data is copied across the public internet from the Azure Blob Storage container to the target Linode instance. This results in egress being calculated by Azure. + +1. The target Linode Object Storage bucket receives the data via IPv6 from the Compute Instance on the region’s private network. Inbound, private IPv6 data to Linode Object Storage is free of charge. The migration status can be monitored using rclone’s WebUI. + +![Azure-to-OBJ-Arch2](Azure-to-OBJ-Arch2.png) + +## Prerequisites and Required Information + +- **A virtual machine with rclone installed**. This guide recommends a 16GB dedicated virtual machine with 8 CPU cores. The plan you require may vary depending on your workload. + +- The **public IPv4 address** of your virtual machine. + +- As a security best practice, **use a firewall to only allow inbound port 5572**. This is the default port used by rclone and enables more secure access to the WebUI since it is served over HTTP. For an additional layer of security, consider setting up an HTTPS gateway. + +- **An up-to-date web browser**. This is used to access the rclone WebUI while monitoring the migration. + +- **SSH access to the virtual machine** with sudo user privileges. + +- An **Azure storage account**. + +- An existing **Azure Blob Storage container** with: + + - Storage account name + - Azure key + - Container name + +- If using Architecture 1, an **Azure Virtual Network** enabled to communicate with the Azure Blob Storage container and Virtual Machine through a [Service Endpoint](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-service-endpoints-overview) is required. + +- An **existing Linode Object Storage bucket** with: + + - Bucket name + - Access key + - Secret key + - Region ID and endpoint URL + + {{< note title="Object Storage Access Keys" >}} + When creating Object Storage access keys, it is a best practice to limit individual bucket access by region along with read/write permissions. See: [Manage access keys](https://techdocs.akamai.com/cloud-computing/docs/manage-access-keys) + {{< /note >}} + +## Migration Steps + +### Initiating the Data Migration + +1. On the instance running rclone, configure rclone to communicate with your source Azure Blob Storage container and your target Linode Object Storage bucket. + + To view the location of the rclone config file, run: + + ```command + rclone config file + ``` + + If the file does not exist yet, you should see output similar to the following: + + ```output + Configuration file doesn't exist, but rclone will use this path: + /home/user/.config/rclone/rclone.conf + ``` + +1. Using the text editor of your choice, add the following configuration to your config file. Replace the following fields with your own corresponding provider and bucket values. Save your changes when complete: + + **Azure Blob Storage** + - {{< placeholder "STORAGE-ACCOUNT-NAME" >}}: The name of the Azure storage account + - {{< placeholder "AZURE-KEY" >}}: Your Azure key + + **Linode Object Storage** + - {{< placeholder "LINODE-ACCESS-KEY" >}}: Your Linode Object Storage access key + - {{< placeholder "LINODE-SECRET-KEY" >}}: Your Linode Object Storage secret key + - {{< placeholder "us-lax-1" >}}: The region ID for your Linode Object Storage bucket + + ```file + [azure] + type = azureblob + account = {{< placeholder "STORAGE-ACCOUNT-NAME" >}} + key = {{< placeholder "AZURE-KEY" >}} + + [linode] + type = s3 + provider = Ceph + access_key_id = {{< placeholder "LINODE-ACCESS-KEY" >}} + secret_access_key = {{< placeholder "LINODE-SECRET-KEY" >}} + endpoint = {{< placeholder "us-lax-1" >}}.linodeobjects.com + acl = private + ``` + + {{< note title="Rclone Providers" >}} + The lines `[azure]` and `[linode]` define the remote providers for your source and target endpoints, respectively. See [Supported Providers](https://rclone.org/#providers) for a complete list of supported rclone providers. + {{< /note >}} + +1. Confirm connectivity to the Azure storage account using your defined remote provider, `azure`: + + ```command + rclone lsd azure: + ``` + + If successful, you should see a list of available buckets: + + ```output + -1 2024-08-30 09:10:47 -1 azure-container-name + ``` + +1. Confirm connectivity to Linode Object Storage using the other defined remote provider, `linode`: + + ```command + rclone lsd linode: + ``` + + Similar to above, you should see a list of available buckets: + + ```output + -1 2024-08-28 14:46:47 -1 linode-bucket-name + ``` + +1. Run the rclone copy command to initiate the migration. + + Replace {{< placeholder "azure-container-name" >}} and {{< placeholder "linode-bucket-name" >}} with the names of your Azure and Linode Object Storage buckets, respectively. Replace {{< placeholder "USERNAME" >}} and {{< placeholder "PASSWORD" >}} with the username and password you want to use to access the rclone WebUI. + + If using Architecture 2, also include the `--bind ::0` flag to write data from your Compute Instance to your Object Storage bucket using IPv6: + + ```command + rclone copy azure:{{< placeholder "azure-container-name" >}}/ linode:{{< placeholder "linode-bucket-name" >}}/ --transfers 50 --rc --rc-addr=0.0.0.0:5572 --log-file=rclone.log --log-level=ERROR --rc-web-gui --rc-user {{< placeholder "USERNAME" >}} --rc-pass {{< placeholder "PASSWORD" >}} + ``` + +#### Rclone Copy Command Breakdown + +- `azure:azure-container-name/`: The Azure remote provider and source Blob Storage container. Including the slash at the end informs the `copy` command to include everything within the bucket. + +- `linode:linode-bucket-name/`: The Linode remote provider and target Object Storage bucket. + +- `--transfers 50`: The `transfers` flag tells rclone how many items to transfer in parallel. Defaults to a value of 4. `50` here speeds up the transfer process by moving up to 50 items in parallel at a given time. + + Your `transfers` value may be different depending on how many objects you are transferring, and you may need to experiment to find the value that works best for your use case. High enough values may result in bandwidth limits being reached. Increasing this value also increases the CPU usage used by rclone. + +- `--rc`: Stands for “remote control”. The `rc` option deploys the http listen server for remote requests. + +- `--rc-addr=0.0.0.0:5572`: Specifies the web address and port number used to access the WebUI GUI. `0.0.0.0` instructs the remote to listen on all IPv4 addresses, and `5572` is the default port number used by rclone to access the WebUI. + +- `--log-file=rclone.log`: The file where rclone writes logs. This file is created in the working directory from where the `copy` command is run. + +- `--log-level=ERROR`: The type of logs to be written to your log file. `ERROR` here specifies only errors are written to the `rclone.log` file. + +- `--rc-web-gui`: Serves the WebUI GUI on the default rclone port (5572). + +- `--rc-user {{< placeholder "USERNAME" >}} and --rc-pass {{< placeholder "PASSWORD" >}}`: The username and password used to access the WebUI GUI. + +{{< note title="Using the htpasswd flag" >}} +An alternative to the `--rc-user` and `--rc-pass` combination is the `--rc-htpasswd` flag. This creates a `htpasswd` file containing a generated username and password combination you can use to log into the rclone WebUI. See [Remote controlling rclone with its API](https://rclone.org/rc/#rc-htpasswd-path) +{{< /note >}} + +- `--bind ::0` (for use with Architecture 2): Tells rclone to write data via IPv6. Note that writing data over IPv6 from a Linode instance to an Object Storage bucket in the same region is free of charge. + +#### Optional Flags + +- `--tpslimit {{< placeholder "XXX" >}}` : Specifies the number of HTTP transactions per second. For larger transfers, it is considered a best practice to set the `tpslimit` below the infrastructure requests per second (rps) limit. Should an rps limit be reached, a 503 `SlowDown` error may result. + + **Example:** If the infrastructure’s requests per second limit is 750 rps, set the tpslimit to 725: + + ```command + --tpslimit {{< placeholder "725" >}} + ``` + +### Monitoring the Migration + +To monitor the status of the `rclone copy` command above, you can access the rclone WebUI GUI from a web browser. + +1. In a web browser window, navigate to your instance’s address over port 5572. Replace {{< placeholder "IP-ADDRESS" >}} with the IPv4 address of your instance: + + ```command + http://{{< placeholder "IP-ADDRESS" >}}:5572 + ``` + +1. When prompted, enter the username and password you specified with the `--rc-user` and `--rc-pass` flags: + + ![Rclone-WebUI-Login](Rclone-WebUI-Login.jpg) + +1. Once logged in, you should see active running jobs along with multiple monitoring statistics, including: job status, throughput and speed, bandwidth max speed, total objects transferred, amount of data transferred, and more. + + ![Rclone-global-stats](Rclone-global-stats.jpg) + + ![Rclone-speed](Rclone-speed.jpg) + +1. The WebUI will disconnect automatically when the copy job is complete. + +## Verify the Migration + +You can compare the number of objects in both your source and target buckets along with the total size of the buckets to verify full completion of the copy job. + +### From the Command Line + +**Azure Blob Storage:** + +```command +rclone size azure:azure-container-name/ +``` + +```output +Total objects: 87.275k (87275) +Total size: 647.612 GiB (695368455398 Byte) +``` + +**Linode Object Storage:** + +```command +rclone size linode:linode-bucket-name/ +``` + +```output +Total objects: 87.275k (87275) +Total size: 647.612 GiB (695368455398 Byte) +``` + +### From a Browser + +Alternatively, you can compare the number of objects and total bucket sizes from the Azure Portal and Cloud Manager on Akamai: + +**Azure Portal:** + +- Navigate to your storage account +- Select the **Monitoring** dropdown +- Click the **Metrics** tab +- Under **Metric Namespace**, select **Blob** +- To see the container size, select **Blob Capacity** under **Metric** +- To see the total number of objects, select **Blob Count** under **Metric** + +**Cloud Manager:** + +- Navigate to **Object Storage** +- Find your target bucket name +- See the **Size** column for the total bucket size and the **Objects** column for the total number of objects + +## Next Steps + +There are several next steps to consider after a successful object storage migration: + +- **Change over your object storage endpoints to your new target bucket.** For example, if you have backups or logs being sent to your old source bucket, edit your jobs to point to the new bucket endpoints. + +- **Edit your configurations to match your new object storage credentials.** In addition to changing the target endpoints for your workloads, your access keys and secret keys need to be updated to your new Linode Object Storage keys. + +- **Confirm the changeover is functioning as expected.** Allow some time to make sure your updated workloads and jobs are interacting successfully with Linode Object Storage. Once you confirm everything is working as expected, you can safely delete the original source bucket and its contents. + +- **Take any additional steps to update your system for S3 compatibility.** Since the Azure Blob Storage API is not S3 compatible, you may need to make internal configuration changes to ensure your system is set up to communicate using S3 protocol. This means your system should be updated to use an S3-compatible [SDK](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingAWSSDK.html) like [Boto3](https://aws.amazon.com/sdk-for-python/) or S3-compatible command line utility like [s3cmd](https://s3tools.org/s3cmd). The [AWS SDK](https://techdocs.akamai.com/cloud-computing/docs/using-the-aws-sdk-for-php-with-object-storage) can also be configured to function with Linode Object Storage. \ No newline at end of file diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/GCS-to-OBJ-Arch1.png b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/GCS-to-OBJ-Arch1.png new file mode 100644 index 00000000000..f493554aebf Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/GCS-to-OBJ-Arch1.png differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/GCS-to-OBJ-Arch2.png b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/GCS-to-OBJ-Arch2.png new file mode 100644 index 00000000000..c70deb63b3a Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/GCS-to-OBJ-Arch2.png differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-WebUI-Login.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-WebUI-Login.jpg new file mode 100644 index 00000000000..e8b1b842e2c Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-WebUI-Login.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-global-stats.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-global-stats.jpg new file mode 100644 index 00000000000..2b5b0de6308 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-global-stats.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-speed.jpg b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-speed.jpg new file mode 100644 index 00000000000..4f5f8d75b07 Binary files /dev/null and b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/Rclone-speed.jpg differ diff --git a/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/index.md b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/index.md new file mode 100644 index 00000000000..1a92c36512a --- /dev/null +++ b/docs/guides/platform/migrate-to-linode/migrate-from-google-cloud-storage-to-linode-object-storage/index.md @@ -0,0 +1,313 @@ +--- +slug: migrate-from-google-cloud-storage-to-linode-object-storage +title: "How to Migrate From Google Cloud Storage to Linode Object Storage" +description: "This guide includes steps for how to migrate content from Google Cloud Storage to Linode Object Storage using rclone." +authors: ["John Dutton"] +contributors: ["John Dutton"] +published: 2024-09-23 +keywords: ['migrate','migration','object storage','google cloud storage','rclone'] +license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)' +external_resources: +- '[Linode Object Storage product documentation](https://techdocs.akamai.com/cloud-computing/docs/object-storage)' +- '[Linode Object Storage guides & tutorials](/docs/guides/platform/object-storage/)' +--- + +Linode Object Storage is an S3-compatible service used for storing large amounts of unstructured data. This guide includes steps on how to migrate up to 100TB of static content from Google Cloud Storage to Linode Object Storage using rclone, along with how to monitor your migration using rclone’s WebUI GUI. + +## Migration Considerations + +- **Migration time:** Migration time varies depending on various factors, including: the size and type of data being copied, the number of overall objects, network conditions, and the hardware limits of the infrastructure involved (i.e. bandwidth and throughput limits, network interfaces, CPU cores, RAM, etc.). + +- **Egress:** Egress is the measurement of outbound data being transferred and often results in a cost to the user. Egress costs may vary depending on provider rates and the amount of data being transferred. See your provider for specific egress rates. + +- **Bucket architecture:** The example in this guide shows steps for migrating content from a single Google Cloud Storage bucket with a standard storage class to a single bucket on Akamai. + + Migrating content and changing over workloads intermittently is recommended to ensure maximum uptime and reliability. + +- **Multiple machines running in parallel:** This guide provides steps for initiating and monitoring a single rclone copy job for a single object storage bucket. One option for speeding up a distributed workload migration is to run multiple rclone instances to migrate multiple buckets at the same time. + + See Linode Object Storage’s [technical specifications](https://techdocs.akamai.com/cloud-computing/docs/object-storage#technical-specifications-and-considerations) for rate and bandwidth limits if you are running multiple virtual machines in parallel. + +- **Large amounts of data:** This guide assumes you are migrating less than 100 million objects and 100TB of static data. If you require a larger amount of data transferred, contact our [sales](https://www.akamai.com/why-akamai/contact-us/contact-sales) and [professional services](https://www.akamai.com/site/en/documents/akamai/akamai-professional-services-and-support.pdf) teams. + +## Migration Architecture Diagram + +There are two architecture options for completing a data migration from Google Cloud Storage to Linode Object Storage. One of these architectures is required to be in place prior to initiating the data migration: + +**Architecture 1:** Utilizes a Google VM instance running rclone in the same region as the source Cloud Storage bucket. Data is transferred internally from the Cloud Storage bucket to the VM instance and then over the public internet from the VM instance to the target Linode Object Storage bucket. + +- **Recommended for:** speed of transfer, users with Google Cloud platform familiarity + +**Architecture 2:** Utilizes a Linode instance running rclone in the same region as the target Object Storage bucket. Data is transferred over the public internet from the Google Cloud Storage bucket to the Linode instance and then internally via IPv6 to the Linode Object Storage bucket. + +- **Recommended for:** ease of implementation, users with Akamai platform familiarity + +{{< note title="Rclone performance" >}} +Rclone generally performs better when placed closer to the source data being copied. During testing for both architectures, Architecture 1 consistently achieved a higher transfer speed than Architecture 2. +{{< /note >}} + +### Architecture 1 + +1. A source Google Cloud Storage bucket with the content to be transferred. + +1. A Google VM instance running rclone in the same region as the source Cloud Storage bucket. The Cloud Storage bucket communicates with the VM instance via VPC within the Google region. + + By default, Google Cloud offers a global VPC with pre-established subnets by region. Your VPC must be configured with [Private Google Access](https://cloud.google.com/vpc/docs/private-google-access) to establish a private connection between Google Cloud Storage and your VM instance + +1. Data is copied across the public internet from the Google VM instance to a target Linode Object Storage bucket. This results in egress (outbound traffic) being calculated by Google Cloud. + +1. The target Linode Object Storage bucket receives data from the Google VM instance. The migration status can be monitored using rclone’s WebUI. + +![GCS-to-OBJ-Arch1](GCS-to-OBJ-Arch1.png) + +### Architecture 2 + +1. A source Google Cloud Storage bucket with the content to be transferred. + +1. A Compute Instance running rclone in the same Akamai core compute region as the target Linode Object Storage bucket. + +1. Data is copied across the public internet from the Google Cloud Storage bucket to the target Linode instance. This results in egress being calculated by Google Cloud. + +1. The target Linode Object Storage bucket receives the data via IPv6 from the Compute Instance on the region’s private network. Inbound, private IPv6 data to Linode Object Storage is free of charge. The migration status can be monitored using rclone’s WebUI. + +![GCS-to-OBJ-Arch2](GCS-to-OBJ-Arch2.png) + +## Prerequisites and Required Information + +- **A virtual machine with rclone installed**. This guide recommends a 16GB dedicated virtual machine with 8 CPU cores. The plan you require may vary depending on your workload. + +- The **public IPv4 address** of your virtual machine. + +- As a security best practice, **use a firewall to only allow inbound port 5572**. This is the default port used by rclone and enables more secure access to the WebUI since it is served over HTTP. For an additional layer of security, consider setting up an HTTPS gateway. + +- **An up-to-date web browser**. This is used to access the rclone WebUI while monitoring the migration. + +- **SSH access to the virtual machine** with sudo user privileges. + +- A **Google Cloud [Service Account](https://cloud.google.com/iam/docs/service-account-overview)**, including: + + - A preconfigured [Role](https://cloud.google.com/iam/docs/roles-overview) with GET and LIST objects and bucket privileges. + - The Role must be [assigned and granted permission to your Service Account](https://cloud.google.com/iam/docs/manage-access-service-accounts). + - JSON key credentials for your Service Account. When [creating a key for your Service Account](https://cloud.google.com/iam/docs/keys-create-delete), select **JSON**, and a key file in JSON format will automatically download to your local machine. + +- An existing **Google Cloud Storage bucket** with: + + - Access control set to “Fine-grained” + - Bucket name + - Project number + - Region ID + +- If using Architecture 1, your Google VM instance needs to be deployed to the same region and VPC as the Cloud Storage bucket. [Private Google Access](https://cloud.google.com/vpc/docs/private-google-access) must be enabled at the subnet level within the VPC. + +- An **existing Linode Object Storage bucket** with: + + - Bucket name + - Access key + - Secret key + - Region ID and endpoint URL + + {{< note title="Object Storage Access Keys" >}} + When creating Object Storage access keys, it is a best practice to limit individual bucket access by region along with read/write permissions. See: [Manage access keys](https://techdocs.akamai.com/cloud-computing/docs/manage-access-keys) + {{< /note >}} + +## Migration Steps + +### Initiating the Data Migration + +1. On the instance running rclone, configure rclone to communicate with your source Google Cloud Storage bucket and your target Linode Object Storage bucket. + + To view the location of the rclone config file, run: + + ```command + rclone config file + ``` + + If the file does not exist yet, you should see output similar to the following: + + ```output + Configuration file doesn't exist, but rclone will use this path: + /home/user/.config/rclone/rclone.conf + ``` + +1. Using the text editor of your choice, add the following configuration to your config file. Replace the following fields with your own corresponding provider and bucket values. Save your changes when complete: + + **GCS** + - {{< placeholder "PROJECT-ID" >}}: The project number associated with your Service Account key. This can be found in your downloaded JSON key file and is labeled `project_id`. + - {{< placeholder "JSON-KEY-CREDENTIALS" >}}: The entire contents of the downloaded JSON key file, including the open and close brackets. **Important:** The JSON contents must be contained to a single line in the file, or an error will occur. + + **Linode Object Storage** + - {{< placeholder "LINODE-ACCESS-KEY" >}}: Your Linode Object Storage access key + - {{< placeholder "LINODE-SECRET-KEY" >}}: Your Linode Object Storage secret key + - {{< placeholder "us-lax-1" >}}: The region ID for your Linode Object Storage bucket + + ```file + [gcs] + type = google cloud storage + anonymous = false + project_number = {{< placeholder "PROJECT-ID" >}} + service_account_credentials = { {{< placeholder "JSON-KEY-CREDENTIALS" >}} } + + [linode] + type = s3 + provider = Ceph + access_key_id = {{< placeholder "LINODE-ACCESS-KEY" >}} + secret_access_key = {{< placeholder "LINODE-SECRET-KEY" >}} + endpoint = {{< placeholder "us-lax-1" >}}.linodeobjects.com + acl = private + ``` + + {{< note title="Rclone Providers" >}} + The lines `[gcs]` and `[linode]` define the remote providers for your source and target endpoints, respectively. See [Supported Providers](https://rclone.org/#providers) for a complete list of supported rclone providers. + {{< /note >}} + +1. Confirm connectivity to Google Cloud Storage using your defined remote provider, `gcs`: + + ```command + rclone lsd gcs: + ``` + + If successful, you should see a list of available buckets: + + ```output + -1 2024-08-30 09:10:47 -1 gcs-bucket-name + ``` + +1. Confirm connectivity to Linode Object Storage using the other defined remote provider, `linode`: + + ```command + rclone lsd linode: + ``` + + Similar to above, you should see a list of available buckets: + + ```output + -1 2024-08-28 14:46:47 -1 linode-bucket-name + ``` + +1. Run the rclone copy command to initiate the migration. + + Replace {{< placeholder "gcs-bucket-name" >}} and {{< placeholder "linode-bucket-name" >}} with the names of your Google Cloud Storage and Linode Object Storage buckets, respectively. Replace {{< placeholder "USERNAME" >}} and {{< placeholder "PASSWORD" >}} with the username and password you want to use to access the rclone WebUI. + + If using Architecture 2, also include the `--bind ::0` flag to write data from your Compute Instance to your Object Storage bucket using IPv6: + + ```command + rclone copy gcs:{{< placeholder "gcs-bucket-name" >}}/ linode:{{< placeholder "linode-bucket-name" >}}/ --transfers 50 --rc --rc-addr=0.0.0.0:5572 --log-file=rclone.log --log-level=ERROR --rc-web-gui --rc-user {{< placeholder "USERNAME" >}} --rc-pass {{< placeholder "PASSWORD" >}} + ``` + +#### Rclone Copy Command Breakdown + +- `gcs:gcs-bucket-name/`: The Google Cloud remote provider and source Cloud Storage bucket. Including the slash at the end informs the `copy` command to include everything within the bucket. + +- `linode:linode-bucket-name/`: The Linode remote provider and target Object Storage bucket. + +- `--transfers 50`: The `transfers` flag tells rclone how many items to transfer in parallel. Defaults to a value of 4. `50` here speeds up the transfer process by moving up to 50 items in parallel at a given time. + + Your `transfers` value may be different depending on how many objects you are transferring, and you may need to experiment to find the value that works best for your use case. High enough values may result in bandwidth limits being reached. Increasing this value also increases the CPU usage used by rclone. + +- `--rc`: Stands for “remote control”. The `rc` option deploys the http listen server for remote requests. + +- `--rc-addr=0.0.0.0:5572`: Specifies the web address and port number used to access the WebUI GUI. `0.0.0.0` instructs the remote to listen on all IPv4 addresses, and `5572` is the default port number used by rclone to access the WebUI. + +- `--log-file=rclone.log`: The file where rclone writes logs. This file is created in the working directory from where the `copy` command is run. + +- `--log-level=ERROR`: The type of logs to be written to your log file. `ERROR` here specifies only errors are written to the `rclone.log` file. + +- `--rc-web-gui`: Serves the WebUI GUI on the default rclone port (5572). + +- `--rc-user {{< placeholder "USERNAME" >}} and --rc-pass {{< placeholder "PASSWORD" >}}`: The username and password used to access the WebUI GUI. + +{{< note title="Using the htpasswd flag" >}} +An alternative to the `--rc-user` and `--rc-pass` combination is the `--rc-htpasswd` flag. This creates a `htpasswd` file containing a generated username and password combination you can use to log into the rclone WebUI. See [Remote controlling rclone with its API](https://rclone.org/rc/#rc-htpasswd-path) +{{< /note >}} + +- `--bind ::0` (for use with Architecture 2): Tells rclone to write data via IPv6. Note that writing data over IPv6 from a Linode instance to an Object Storage bucket in the same region is free of charge. + +#### Optional Flags + +- `--tpslimit {{< placeholder "XXX" >}}` : Specifies the number of HTTP transactions per second. For larger transfers, it is considered a best practice to set the `tpslimit` below the infrastructure requests per second (rps) limit. Should an rps limit be reached, a 503 `SlowDown` error may result. + + **Example:** If the infrastructure’s requests per second limit is 750 rps, set the tpslimit to 725: + + ```command + --tpslimit {{< placeholder "725" >}} + ``` + +### Monitoring the Migration + +To monitor the status of the `rclone copy` command above, you can access the rclone WebUI GUI from a web browser. + +1. In a web browser window, navigate to your instance’s address over port 5572. Replace {{< placeholder "IP-ADDRESS" >}} with the IPv4 address of your instance: + + ```command + http://{{< placeholder "IP-ADDRESS" >}}:5572 + ``` + +1. When prompted, enter the username and password you specified with the `--rc-user` and `--rc-pass` flags: + + ![Rclone-WebUI-Login](Rclone-WebUI-Login.jpg) + +1. Once logged in, you should see active running jobs along with multiple monitoring statistics, including: job status, throughput and speed, bandwidth max speed, total objects transferred, amount of data transferred, and more. + + ![Rclone-global-stats](Rclone-global-stats.jpg) + + ![Rclone-speed](Rclone-speed.jpg) + +1. The WebUI will disconnect automatically when the copy job is complete. + +## Verify the Migration + +You can compare the number of objects in both your source and target buckets along with the total size of the buckets to verify full completion of the copy job. + +### From the Command Line + +**Google Cloud Storage:** + +```command +rclone size gcs:gcs-bucket-name/ +``` + +```output +Total objects: 87.275k (87275) +Total size: 647.612 GiB (695368455398 Byte) +``` + +**Linode Object Storage:** + +```command +rclone size linode:linode-bucket-name/ +``` + +```output +Total objects: 87.275k (87275) +Total size: 647.612 GiB (695368455398 Byte) +``` + +### From a Browser + +Alternatively, you can compare the number of objects and total bucket sizes from the Google Cloud Management Console and Cloud Manager on Akamai: + +**Google Cloud Management Console:** + +- Navigate to **Cloud Storage** +- Select the source bucket name +- Click the **Observability** tab +- Select a timeframe and see **Total storage by object state** +- To see the total number of objects, add the **Object Count** Storage widget to your Observability dashboard + +**Cloud Manager:** + +- Navigate to **Object Storage** +- Find your target bucket name +- See the **Size** column for the total bucket size and the **Objects** column for the total number of objects + +## Next Steps + +There are several next steps to consider after a successful object storage migration: + +- **Change over your object storage endpoints to your new target bucket.** For example, if you have backups or logs being sent to your old source bucket, edit your jobs to point to the new bucket endpoints. + +- **Edit your configurations to match your new object storage credentials.** In addition to changing the target endpoints for your workloads, your access keys and secret keys need to be updated to your new Linode Object Storage keys. + +- **Confirm the changeover is functioning as expected.** Allow some time to make sure your updated workloads and jobs are interacting successfully with Linode Object Storage. Once you confirm everything is working as expected, you can safely delete the original source bucket and its contents. + +- **Take any additional steps to update your system for S3 compatibility.** You may need to make additional internal configuration changes to ensure your system is set up to communicate using S3 protocol. See Google’s documentation for [interoperability with other storage providers](https://cloud.google.com/storage/docs/interoperability). \ No newline at end of file diff --git a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/index.md b/docs/guides/platform/object-storage/migrate-to-linode-object-storage/index.md deleted file mode 100644 index 01a38b04f1f..00000000000 --- a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/index.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -slug: migrate-to-linode-object-storage -title: "Migrate to Linode Object Storage" -title_meta: "How to Migrate to Linode Object Storage" -description: "Want to migrate from AWS S3 to Linode Object Storage? This tutorial covers the tools needed to copy and sync objects and buckets from Amazon to Linode." -authors: ["Nathaniel Stickman"] -contributors: ["Nathaniel Stickman"] -published: 2022-10-08 -keywords: ['amazon s3 migrate files','aws s3 migrate object','linode object storage'] -license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)' -external_resources: -- '[How-to Geek: How to Migrate an AWS S3 Bucket to Another Account or Service](https://www.howtogeek.com/devops/how-to-migrate-an-aws-s3-bucket-to-another-account-or-service/)' -- '[Google Cloud: Simple migration from Amazon S3 to Cloud Storage](https://cloud.google.com/storage/docs/aws-simple-migration#storage-list-buckets-s3-go)' -- '[IBM Cloud: Migrating from AWS](https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-migrate)' ---- - -[Linode Object Storage](https://www.linode.com/products/object-storage/) is S3-compatible. So it not only offers all the benefits of S3, but can also leverage common S3 tooling. This lets Linode Object Storage instances work with hyper-scale S3s like AWS and Google Cloud. - -This tutorial the tooling needed to make migration from AWS S3 to Linode Object Storage a smooth and straightforward process. It covers what you need to know before making the migration, then gives you two options depending on your needs: - -- How to use rclone to migrate one bucket at a time. - -- How to use a custom Python script to migrate all of your buckets at once. - -## Before You Begin - -1. Familiarize yourself with our [Getting Started with Linode](/docs/products/platform/get-started/) guide, and complete the steps for setting your Linode's hostname and timezone. - -1. This guide uses `sudo` wherever possible. Complete the sections of our [How to Secure Your Server](/docs/products/compute/compute-instances/guides/set-up-and-secure/) guide to create a standard user account, harden SSH access, and remove unnecessary network services. - -1. Update your system. - - - **Debian** and **Ubuntu**: - - sudo apt update && sudo apt upgrade - - - **AlmaLinux**, **CentOS Stream** (8 or later), **Fedora**, and **Rocky Linux**: - - sudo dnf upgrade -{{< note >}} -This guide is written for a non-root user. Commands that require elevated privileges are prefixed with `sudo`. If you’re not familiar with the `sudo` command, see the [Users and Groups](/docs/guides/linux-users-and-groups/) guide. -{{< /note >}} - -## How S3 Migration Works - -While popularized by AWS, S3 has become a widely used model for object storage. Because they share the same model, these S3-compatible object storage services can interact with the same tooling. - -Linode Object Storage is no different. For instance, you can fully operate your Linode buckets through the s3cmd tool commonly used for managing AWS S3 and other S3-compatible services. You can learn more about that in our guide [Using S3cmd with Object Storage](/docs/products/storage/object-storage/guides/s3cmd/). - -As such, most tools designed for working with S3 can be used seamlessly with either AWS S3 or Linode Object Storage. - -This includes the two tools used in this tutorial: - -- **rclone**: a popular tool for transferring data between storage systems. - -- **Boto3**: a Python library designed for managing AWS S3 instances. - -## What to Consider Before Migrating to Linode Object Storage - -Both migration processes in this tutorial require you to have access and secret keys for your AWS S3 and Linode Object Storage instances. - -- To learn about creating and managing access keys for Linode Object Storage, take a look at our guide [Manage Access Keys](/docs/products/storage/object-storage/guides/access-keys/) - -- For more on AWS access keys, take a look at the AWS [documentation on access keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html). Essentially, navigate to the **Security Credentials** page, scroll down, and select **Create access key**. - -Throughout the rest of this tutorial, and in its supplementary files, you need to substitute the placeholders for your own keys. This means replacing `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` with the access and secret keys, respectively, for your AWS S3 instance. Likewise, replace `LINODE_ACCESS_KEY` and `LINODE_SECRET_KEY` with your access and secret keys, respectively, for your Linode Object Storage instance. - -You also need to have the region name used for each instance as well: - -- **Linode Object Storage**: The region name for your bucket is provided in the endpoint URL. For instance, if your endpoint URL is `example-aws-bucket-1.us-southeast-1.linodeobjects.com`, the region name for your bucket is `us-southeast-1`. - -- **AWS S3**: The region name for your bucket is provided on the dashboard, within the listing of your buckets. - -This tutorial uses `us-east-2` as the placeholder for the AWS S3 region and `us-southeast-1` as the placeholder for the Linode Object Storage region. Replace both throughout with your own instances' regions. - -## How to Migrate a Bucket from AWS S3 to Linode Object Storage - -When migrating one or only a few buckets, *rclone* provides the smoothest process. Enter the credentials and connection details for your S3 instances, and you can migrate a bucket with a single command. - -These next few sections walk you through that process. They demonstrate how to set up rclone on your system, how to configure it, and the commands used to migrate buckets. - -### Setting Up the Prerequisites - -To get started, you need to install the rclone tool and connect it to both your AWS S3 and Linode Object Storage instances. - -1. Install rclone. rclone specializes in transferring files over SSH, but it also comes with full support for connecting to and transferring data over S3. - - - **Debian** and **Ubuntu**: - - sudo apt install rclone - - - **Fedora**: - - sudo dnf install rclone - - - **AlmaLinux**, **CentOS Stream**, and **Rocky Linux**: - - sudo dnf install epel-release - sudo dnf install rclone - - You can then verify your installation with: - - rclone version - - {{< output >}} -rclone v1.53.3-DEV -- os/arch: linux/amd64 -- go version: go1.18 - {{< /output >}} - -1. Create an rclone configuration file with the connection details for the AWS S3 and Linode Object Storage instances. The rclone configuration file, located at `~/.config/rclone/rclone.conf`, can hold multiple connection configurations. Here, the connections are named `awss3` and `linodes3`. - - Replace the `AWS_ACCESS_KEY`, `AWS_SECRET_KEY`, `LINODE_ACCESS_KEY`, and `LINODE_SECRET_KEY` with your instances' access and secret keys. Placeholder regions have been provided below — `us-east-2` for AWS and `us-southeast-1` for Linode. Be sure to replace these as well with your instances' actual region names. - - {{< file "~/.config/rclone/rclone.conf" >}} -[awss3] -type = s3 -env_auth = false -acl = private -access_key_id = AWS_ACCESS_KEY -secret_access_key = AWS_SECRET_KEY -region = us-east-2 -location_constraint = us-east-2 - -[linodes3] -type = s3 -env_auth = false -acl = private -access_key_id = LINODE_ACCESS_KEY -secret_access_key = LINODE_SECRET_KEY -region = us-southeast-1 -endpoint = us-southeast-1.linodeobjects.com -{{< /file >}} - -1. You can then verify your configuration by listing the remote storage sources for rclone: - - rclone listremotes --long - - {{< output >}} -awss3: s3 -linodes3: s3 - {{< /output >}} - - You can further verify the connections by checking object contents for buckets on the storage services. This command, for instance, lists the contents of the `example-aws-bucket-1` on the services configured under `awss3`: - - rclone tree awss3:example-aws-bucket-1 - - In this case, the AWS S3 bucket has two text files. - - {{< output >}} -/ -├── example-text-file-1.txt -└── example-text-file-2.txt - {{< /output >}} - -### Syncing Buckets - -rclone works by copying files from a storage source to a storage destination. Once you have a configuration like the one above, copying can be easily done with a command like the following. This example copies objects from an AWS S3 bucket named `example-aws-bucket-1` to a Linode Object Storage bucket named `example-linode-bucket-1`: - - rclone copy awss3:example-aws-bucket-1 linodes3:example-linode-bucket-1 -P - -{{< output >}} -Transferred: 177 / 177 Bytes, 100%, 468 Bytes/s, ETA 0s -Transferred: 2 / 2, 100% -Elapsed time: 0.5s -{{< /output >}} - -The `-P` option tells rclone to output the steps in the transfer process. You can also test out a given copy command by using the `--dry-run` option. - -Alternatively to the `copy` command, you can use the `sync` command. With `sync`, any files in the destination that are not in the source are deleted. In other words, this command has rclone make the contents of the destination bucket match the contents of the source bucket. The `sync` command should be used when you strictly need the destination to match the source. - -### Verifying the Results - -The simplest way to verify the results is through rclone itself. You can use a `tree` command like the one shown below: - - rclone tree linodes3:example-linode-bucket-1 - -{{< output >}} -/ -├── example-text-file-1.txt -└── example-text-file-2.txt -{{< /output >}} - -Alternatively, you can also check in the Linode Cloud Manager by navigating to your Object Storage instance and selecting the target bucket. - -![Objects reflected in Linode Object Storage bucket](linode-objects-bucket.png) - -## How to Migrate All Buckets from AWS S3 to Linode Object Storage - -The approach covered above works well when you need to migrate a few buckets. But it quickly becomes unrealistic when you have numerous buckets you need to migrate from AWS to Linode. - -To address this, the following sections walk you through using a custom Python script for migrating AWS S3 buckets to a Linode Object Storage instance. - -The script uses Boto3, Amazon's Python SDK for interacting with and managing AWS S3 buckets. The SDK can readily interface with many other S3-compatible services, including Linode Object Storage. - -### Setting Up the Prerequisites - -This process uses Python 3 with the Boto3 library to connect to and operate the AWS S3 and Linode Object Storage buckets. To get this working, you also need to provide credentials for connecting to each of your instances. - -Follow the steps here to get the prerequisite software you need and find links to download the migration script and its configuration file. - -1. Ensure that you have Python 3 and Pip 3 installed. You can find instructions for installing these on your system in the [Install Python 3 and pip3](/docs/products/tools/cli/guides/install/#install-python-3-and-pip3) section of our guide on installing the Linode CLI. - -1. Install the Boto3 Python library via Pip 3: - - pip3 install boto3 - -1. Download the configuration file for the migration script [here](s3_migration.conf). Then, modify the configurations to match your AWS and Linode instances' credentials and regions. - - Note that the `endpoint_url` value needs to have the `http`/`https` prefix and should be the Linode endpoint excluding the bucket portion of the URL. - -1. Finally, download the migration script [here](s3_migration.py). - -### Understanding the Script - -The script downloaded above should already cover most use cases for migrating from an AWS S3 instance to a Linode Object Storage instance. Nevertheless, you may want familiarize yourself with the script and make adjustments to fit your particular needs. - -To help make navigating and reviewing the script easier, here is a rough diagram of its operations. The diagram does not represent a one-to-one outline of the script. Instead, its purpose is to clarify the script's organization and order of operations. - -![Rough diagram of the migration script](s3-migration-script-model.png) - -### Running the Script - -When you are ready, you can run the script with the following Python command: - - python3 s3_migration.py - -The output indicates the script's progress and provides alerts if any errors are encountered along the way. - -### Verifying the Results - -You can verify the script's success in the same manner as shown in the section on rclone above. Probably the most accessible method here is navigating to your the Linode Cloud Manager and taking a look at your Object Storage instance. There, you should see the buckets from your AWS S3 instance and, within them, the objects that have been migrated. - -![Objects migrated from AWS and reflected in a new Linode Object Storage bucket](linode-aws-objects-bucket.png) - -## Conclusion - -This tutorial has covered the tools you need to migrate from an AWS S3 instance to a Linode Object Storage instance. You can readily migrate one or even a few buckets with a straightforward rclone setup. Or you can use our custom script to migrate all of your buckets from one instance to another. \ No newline at end of file diff --git a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/linode-aws-objects-bucket.png b/docs/guides/platform/object-storage/migrate-to-linode-object-storage/linode-aws-objects-bucket.png deleted file mode 100644 index b73b3294237..00000000000 Binary files a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/linode-aws-objects-bucket.png and /dev/null differ diff --git a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/linode-objects-bucket.png b/docs/guides/platform/object-storage/migrate-to-linode-object-storage/linode-objects-bucket.png deleted file mode 100644 index f4cd7183226..00000000000 Binary files a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/linode-objects-bucket.png and /dev/null differ diff --git a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3-migration-script-model.png b/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3-migration-script-model.png deleted file mode 100644 index a93f21a8f2c..00000000000 Binary files a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3-migration-script-model.png and /dev/null differ diff --git a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3-migration-script-model.uml b/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3-migration-script-model.uml deleted file mode 100644 index 971e31912a6..00000000000 --- a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3-migration-script-model.uml +++ /dev/null @@ -1,32 +0,0 @@ -@startuml -!theme plain - -s3_migration -> boto3 : client(aws_conf) -s3_migration <-- boto3 : aws_client -s3_migration -> boto3 : client(linode_conf) -s3_migration <-- boto3 : linode_client - -s3_migration -> s3_migration : initialize_temporary_storage() - -s3_migration -> aws_client : list_buckets() -s3_migration <-- aws_client : aws_buckets -s3_migration -> linode_client : list_buckets() -s3_migration <-- linode_client : linode_buckets - -loop bucket in aws_buckets - alt bucket not in linode_buckets - s3_migration -> linode_client : create_bucket(bucket) - end -end - -loop bucket in aws_buckets - loop object in bucket - s3_migration -> aws_client : download_file(bucket, object) - s3_migration <-- aws_client : object_file - s3_migration -> linode_client : upload_file(bucket, object_file) - end -end - -s3_migration -> s3_migration : remove_temporary_storage() -@enduml - diff --git a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3_migration.conf b/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3_migration.conf deleted file mode 100644 index 2f89acd3188..00000000000 --- a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3_migration.conf +++ /dev/null @@ -1,11 +0,0 @@ -[awss3] -aws_access_key_id = AWS_ACCESS_KEY -aws_secret_access_key = AWS_SECRET_KEY -region_name = us-east-2 - -[linodes3] -aws_access_key_id = LINODE_ACCESS_KEY -aws_secret_access_key = LINODE_SECRET_KEY -region_name = us-southeast-1 -endpoint_url = https://us-southeast-1.linodeobjects.com - diff --git a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3_migration.py b/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3_migration.py deleted file mode 100644 index 7e75fd94d8f..00000000000 --- a/docs/guides/platform/object-storage/migrate-to-linode-object-storage/s3_migration.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import re -import shutil - -import configparser - -import boto3 -from botocore.exceptions import ClientError - -CONFIG_FILE_LOCATION = "s3_migration.conf" -TEMP_STORAGE_DIRECTORY = "temp_storage/" - -def initialize_temporary_storage(): - if not os.path.exists(TEMP_STORAGE_DIRECTORY): - os.mkdir(TEMP_STORAGE_DIRECTORY) - else: - print("\nAbort: Temporary storage directory " + TEMP_STORAGE_DIRECTORY + " already exists. Remove it or specify a different temporary storage directory.") - exit() - -def remove_temporary_storage(): - shutil.rmtree(TEMP_STORAGE_DIRECTORY) - -def create_s3_client(instance_designation): - config = configparser.ConfigParser() - config.read(CONFIG_FILE_LOCATION) - - if instance_designation not in config: - print("\nAbort: Missing configration entry for " + instance_designation + ".") - exit() - - instance_config = config[instance_designation] - - if "endpoint_url" in instance_config: - return boto3.client( - service_name = "s3", - aws_access_key_id = instance_config["aws_access_key_id"], - aws_secret_access_key = instance_config["aws_secret_access_key"], - region_name = instance_config["region_name"], - endpoint_url = instance_config["endpoint_url"], - use_ssl = True - ) - else: - return boto3.client( - service_name = "s3", - aws_access_key_id = instance_config["aws_access_key_id"], - aws_secret_access_key = instance_config["aws_secret_access_key"], - region_name = instance_config["region_name"], - use_ssl = True - ) - -def get_bucket_list(client): - buckets = client.list_buckets() - return buckets["Buckets"] - -def has_matching_buckets(source_bucket, destination_bucket_list): - matching_buckets = [x for x in destination_bucket_list if x["Name"] == source_bucket] - return len(matching_buckets) > 0 - -def create_bucket(client, bucket_name): - try: - client.create_bucket(Bucket=bucket_name) - except ClientError as e: - return False - return True - -def copy_bucket_objects(source_client, destination_client, bucket_name): - source_bucket_objects = source_client.list_objects(Bucket=bucket_name)["Contents"] - - for source_object in source_bucket_objects: - source_object_name = source_object["Key"] - - if re.search(r'/$', source_object_name): - os.mkdir(TEMP_STORAGE_DIRECTORY + source_object_name) - else: - source_client.download_file(bucket_name, source_object_name, TEMP_STORAGE_DIRECTORY + source_object_name) - - print("\tTranferring " + source_object_name + " to destination.") - try: - destination_client.upload_file(TEMP_STORAGE_DIRECTORY + source_object_name, bucket_name, source_object_name) - except ClientError as e: - print("\t\tError occurred while uploading " + source_object_name + ".") - -def create_matching_buckets(source_client, destination_client): - source_bucket_list = get_bucket_list(source_client) - destination_bucket_list = get_bucket_list(destination_client) - - for bucket in source_bucket_list: - if not has_matching_buckets(bucket["Name"], destination_bucket_list): - print("\tCreating " + bucket["Name"] + " on destination.") - bucket_creation_result = create_bucket(destination_client, bucket["Name"]) - print("\t\t" + ("Success" if bucket_creation_result else "Failed")) - -def copy_instance_objects(source_client, destination_client): - source_bucket_list = get_bucket_list(source_client) - destination_bucket_list = get_bucket_list(destination_client) - - for bucket in source_bucket_list: - if has_matching_buckets(bucket["Name"], destination_bucket_list): - copy_bucket_objects(source_client, destination_client, bucket["Name"]) - else: - print("\tDestination does not have " + bucket["Name"] + ". Create matching buckets first to copy objects to.") - -def main(): - aws_client = create_s3_client("awss3") - linode_client = create_s3_client("linodes3") - - print("Initiating migration from AWS S3 to Linode Object Storage.") - - print("\nCreating a temporary storage directory.") - initialize_temporary_storage() - - print("\nChecking for matching buckets on Linode.") - create_matching_buckets(aws_client, linode_client) - - print("\nCopying objects from AWS to Linode.") - copy_instance_objects(aws_client, linode_client) - - print("\nCleaning up temporary storage.") - remove_temporary_storage() - -if __name__ == '__main__': - main() -