diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 4b23a13f1b1b..e7e14be753ae 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -3090,6 +3090,11 @@ Topics: Topics: - Name: Backing up applications on ROSA STS using OADP File: oadp-rosa-backing-up-applications + - Name: OADP and AWS STS + Dir: aws-sts + Topics: + - Name: Backing up applications on AWS STS using OADP + File: oadp-aws-sts - Name: OADP 1.2 Data Mover Dir: installing Topics: diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/_attributes b/backup_and_restore/application_backup_and_restore/aws-sts/_attributes new file mode 120000 index 000000000000..bf7c2529fdb4 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/aws-sts/_attributes @@ -0,0 +1 @@ +../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/images b/backup_and_restore/application_backup_and_restore/aws-sts/images new file mode 120000 index 000000000000..4399cbb3c0f3 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/aws-sts/images @@ -0,0 +1 @@ +../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/modules b/backup_and_restore/application_backup_and_restore/aws-sts/modules new file mode 120000 index 000000000000..5be29a99c161 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/aws-sts/modules @@ -0,0 +1 @@ +../../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc b/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc new file mode 100644 index 000000000000..46980d402da0 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc @@ -0,0 +1,43 @@ +:_mod-docs-content-type: ASSEMBLY +[id="oadp-aws-sts"] += Backing up applications on AWS STS using OADP +include::_attributes/common-attributes.adoc[] +:context: oadp-aws-sts-backing-up-applications + +toc::[] + +You install the OpenShift API for Data Protection (OADP) with Amazon Web Services (AWS) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. + +include::snippets/oadp-mtc-operator.adoc[] + +You configure {aws-short} for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../..//backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. + +To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. + +You can install OADP on an AWS Security Token Service (AWS STS) cluster manually. Amazon AWS provides AWS Security Token Service (AWS STS) as a web service that enables you to request temporary, limited-privilege credentials for users. You use STS to provide trusted users with temporary access to resources via API calls, your AWS console or the AWS command line interface (CLI). + +Before installing {oadp-first}, you must set up role and policy credentials for OADP so that it can use the {aws-full} API. + +This process is performed in the following two stages: + +. Prepare {aws-short} credentials +. Install the OADP Operator and give it an IAM role + +include::modules/preparing-aws-sts-credentials-for-oadp.adoc[leveloffset=+1] + +include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] + +include::modules/installing-oadp-aws-sts.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console]. +* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backing up applications] + +[id="oadp-aws-sts-backing-up-and-cleaning"] +== Example: Backing up workload on OADP AWS STS, with an optional cleanup + +include::modules/performing-a-backup-oadp-aws-sts.adoc[leveloffset=+2] + +include::modules/cleanup-a-backup-oadp-aws-sts.adoc[leveloffset=+2] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/snippets b/backup_and_restore/application_backup_and_restore/aws-sts/snippets new file mode 120000 index 000000000000..ce62fd7c41e2 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/aws-sts/snippets @@ -0,0 +1 @@ +../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc index 40ef7ce14217..c14470b45362 100644 --- a/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc +++ b/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc @@ -32,8 +32,8 @@ include::modules/installing-oadp-rosa-sts.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html/operators/user-tasks#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console]. -* link:https://docs.openshift.com/container-platform/4.14/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.html[Backing up applications] +* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console]. +* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backing up applications] [id="oadp-rosa-backing-up-and-cleaning"] == Example: Backing up workload on OADP ROSA STS, with an optional cleanup diff --git a/modules/cleanup-a-backup-oadp-aws-sts.adoc b/modules/cleanup-a-backup-oadp-aws-sts.adoc new file mode 100644 index 000000000000..7cba8fa822f6 --- /dev/null +++ b/modules/cleanup-a-backup-oadp-aws-sts.adoc @@ -0,0 +1,104 @@ +// Module included in the following assemblies: +// +// * backup_and_restore/application_backup_and_restore/oadp-aws-sts/oadp-aws-sts.adoc + +:_mod-docs-content-type: PROCEDURE +[id="cleanup-a-backup-oadp-aws-sts_{context}"] += Cleaning up a cluster after a backup with OADP and AWS STS + +If you need to uninstall the {oadp-first} Operator together with the backups and the S3 bucket from this example, follow these instructions. + +.Procedure + +. Delete the workload by running the following command: ++ +[source,terminal] +---- +$ oc delete ns hello-world +---- + +. Delete the Data Protection Application (DPA) by running the following command: ++ +[source,terminal] +---- +$ oc -n openshift-adp delete dpa ${CLUSTER_NAME}-dpa +---- + +. Delete the cloud storage by running the following command: ++ +[source,terminal] +---- +$ oc -n openshift-adp delete cloudstorage ${CLUSTER_NAME}-oadp +---- + ++ +[WARNING] +==== +If this command hangs, you might need to delete the finalizer by running the following command: + +[source,terminal] +---- +$ oc -n openshift-adp patch cloudstorage ${CLUSTER_NAME}-oadp -p '{"metadata":{"finalizers":null}}' --type=merge +---- +==== + +. If the Operator is no longer required, remove it by running the following command: ++ +[source,terminal] +---- +$ oc -n openshift-adp delete subscription oadp-operator +---- + +. Remove the namespace from the Operator: ++ +[source,terminal] +---- +$ oc delete ns openshift-adp +---- + +. If the backup and restore resources are no longer required, remove them from the cluster by running the following command: ++ +[source,terminal] +---- +$ oc delete backup hello-world +---- + +. To delete backup, restore and remote objects in {aws-short} S3 run the following command: ++ +[source,terminal] +---- +$ velero backup delete hello-world +---- + +. If you no longer need the Custom Resource Definitions (CRD), remove them from the cluster by running the following command: ++ +[source,terminal] +---- +$ for CRD in `oc get crds | grep velero | awk '{print $1}'`; do oc delete crd $CRD; done +---- + +. Delete the {aws-short} S3 bucket by running the following commands: ++ +[source,terminal] +---- +$ aws s3 rm s3://${CLUSTER_NAME}-oadp --recursive +---- ++ +[source,terminal] +---- +$ aws s3api delete-bucket --bucket ${CLUSTER_NAME}-oadp +---- + +. Detach the policy from the role by running the following command: ++ +[source,terminal] +---- +$ aws iam detach-role-policy --role-name "${ROLE_NAME}" --policy-arn "${POLICY_ARN}" +---- + +. Delete the role by running the following command: ++ +[source,terminal] +---- +$ aws iam delete-role --role-name "${ROLE_NAME}" +---- diff --git a/modules/installing-oadp-aws-sts.adoc b/modules/installing-oadp-aws-sts.adoc new file mode 100644 index 000000000000..09c14b563fe7 --- /dev/null +++ b/modules/installing-oadp-aws-sts.adoc @@ -0,0 +1,260 @@ +// Module included in the following assemblies: +// +// * backup_and_restore/application_backup_and_restore/oadp-aws-sts/oadp-aws-sts.adoc + +:_mod-docs-content-type: PROCEDURE +[id="installing-oadp-aws-sts_{context}"] += Installing the OADP Operator and providing the IAM role + +AWS Security Token Service (AWS STS) is a global web service that provides short-term credentials for IAM or federated users. This document describes how to install {oadp-first} on an {aws-short} {sts-short} cluster manually. + +[IMPORTANT] +==== +Restic and Kopia are not supported in the OADP {aws-short} {sts-short} environment. Verify that the Restic and Kopia node agent is disabled. +For backing up volumes, OADP on {aws-short} {sts-short} supports only native snapshots and Container Storage Interface (CSI) snapshots. +==== + +[IMPORTANT] +==== +In an Amazon {aws-short} cluster that uses STS authentication, restoring backed-up data in a different {aws-short} region is not supported. + +The Data Mover feature is not currently supported in {aws-short} {sts-short} clusters. You can use native {aws-short} S3 tools for moving data. +==== + +.Prerequisites + +* An {product-title} {aws-short} {sts-short} cluster with the required access and tokens. For instructions, see the previous procedure _Preparing AWS credentials for OADP_. If you plan to use two different clusters for backing up and restoring, you must prepare {aws-short} credentials, including `ROLE_ARN`, for each cluster. + +.Procedure + +. Create an {product-title} secret from your {aws-short} token file by entering the following commands: + +.. Create the credentials file: ++ +[source,terminal] +---- +$ cat < ${SCRATCH}/credentials + [default] + role_arn = ${ROLE_ARN} + web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token +EOF +---- + +.. Create a namespace for OADP: ++ +[source,terminal] +---- +$ oc create namespace openshift-adp +---- + +.. Create the {product-title} secret: ++ +[source,terminal] +---- +$ oc -n openshift-adp create secret generic cloud-credentials \ + --from-file=${SCRATCH}/credentials +---- ++ +[NOTE] +==== +In {product-title} versions 4.14 and later, the OADP Operator supports a new standardized {sts-short} workflow through the Operator Lifecycle Manager (OLM) +and Cloud Credentials Operator (CCO). In this workflow, you do not need to create the above +secret, you only need to supply the role ARN during the installation of OLM-managed operators using the {product-title} web console, for more information see _Installing from OperatorHub using the web console_. + +The preceding secret is created automatically by CCO. +==== + +. Install the OADP Operator: +.. In the {product-title} web console, browse to *Operators* -> *OperatorHub*. +.. Search for the *OADP Operator*. +.. In the *role_ARN* field, paste the role_arn that you created previously and click *Install*. + +. Create {aws-short} cloud storage using your {aws-short} credentials by entering the following command: ++ +[source,terminal] +---- +$ cat << EOF | oc create -f - + apiVersion: oadp.openshift.io/v1alpha1 + kind: CloudStorage + metadata: + name: ${CLUSTER_NAME}-oadp + namespace: openshift-adp + spec: + creationSecret: + key: credentials + name: cloud-credentials + enableSharedConfig: true + name: ${CLUSTER_NAME}-oadp + provider: aws + region: $REGION +EOF +---- +// bringing over from MOB docs +. Check your application's storage default storage class by entering the following command: ++ +[source,terminal] +---- +$ oc get pvc -n +---- + ++ +.Example output + ++ +[source,terminal] +---- +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +applog Bound pvc-351791ae-b6ab-4e8b-88a4-30f73caf5ef8 1Gi RWO gp3-csi 4d19h +mysql Bound pvc-16b8e009-a20a-4379-accc-bc81fedd0621 1Gi RWO gp3-csi 4d19h +---- + + +. Get the storage class by running the following command: ++ +[source,terminal] +---- +$ oc get storageclass +---- + ++ +.Example output ++ +[source,terminal] +---- +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer true 4d21h +gp2-csi ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h +gp3 ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h +gp3-csi (default) ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h +---- ++ +[NOTE] +==== +The following storage classes will work: + + * gp3-csi + * gp2-csi + * gp3 + * gp2 +==== ++ +If the application or applications that are being backed up are all using persistent volumes (PVs) with Container Storage Interface (CSI), it is advisable to include the CSI plugin in the OADP DPA configuration. + +. Create the `DataProtectionApplication` resource to configure the connection to the storage where the backups and volume snapshots are stored: + +.. If you are using only CSI volumes, deploy a Data Protection Application by entering the following command: ++ +[source,terminal] +---- +$ cat << EOF | oc create -f - + apiVersion: oadp.openshift.io/v1alpha1 + kind: DataProtectionApplication + metadata: + name: ${CLUSTER_NAME}-dpa + namespace: openshift-adp + spec: + backupImages: true <1> + features: + dataMover: + enable: false + backupLocations: + - bucket: + cloudStorageRef: + name: ${CLUSTER_NAME}-oadp + credential: + key: credentials + name: cloud-credentials + prefix: velero + default: true + config: + region: ${REGION} + configuration: + velero: + defaultPlugins: + - openshift + - aws + - csi + restic: + enable: false +EOF +---- +<1> Set this field to `false` if you do not want to use image backup. + +// . Create the `DataProtectionApplication` resource, which is used to configure the connection to the storage where the backups and volume snapshots are stored: + +.. If you are using CSI or non-CSI volumes, deploy a Data Protection Application by entering the following command: ++ +[source,terminal] +---- +$ cat << EOF | oc create -f - + apiVersion: oadp.openshift.io/v1alpha1 + kind: DataProtectionApplication + metadata: + name: ${CLUSTER_NAME}-dpa + namespace: openshift-adp + spec: + backupImages: true <1> + features: + dataMover: + enable: false + backupLocations: + - bucket: + cloudStorageRef: + name: ${CLUSTER_NAME}-oadp + credential: + key: credentials + name: cloud-credentials + prefix: velero + default: true + config: + region: ${REGION} + configuration: + velero: + defaultPlugins: + - openshift + - aws + nodeAgent: <2> + enable: false + uploaderType: restic + snapshotLocations: + - velero: + config: + credentialsFile: /tmp/credentials/openshift-adp/cloud-credentials-credentials <3> + enableSharedConfig: "true" <4> + profile: default <5> + region: ${REGION} <6> + provider: aws +EOF +---- +<1> Set this field to false if you do not want to use image backup. +<2> See the following note. +<3> The `credentialsFile` field is the mounted location of the bucket credential on the pod. +<4> The `enableSharedConfig` field allows the `snapshotLocations` to share or reuse the credential defined for the bucket. +<5> Use the profile name set in the {aws-short} credentials file. +<6> Specify `region` as your {aws-short} region. This must be the same as the cluster region. ++ +You are now ready to back up and restore {product-title} applications, as described in _Backing up applications_. + +[NOTE] +==== +If you use OADP 1.2, replace this configuration: + +[source,terminal] +---- +nodeAgent: + enable: false + uploaderType: restic +---- +with the following configuration: + +[source,terminal] +---- +restic: + enable: false +---- +==== + +[NOTE] +==== +If you want to use two different clusters for backing up and restoring, the two clusters must have the same {aws-short} S3 storage names in both the cloud storage CR and the OADP `DataProtectionApplication` configuration. +==== diff --git a/modules/oadp-setting-resource-limits-and-requests.adoc b/modules/oadp-setting-resource-limits-and-requests.adoc index 17c366b88d56..c363f7464eae 100644 --- a/modules/oadp-setting-resource-limits-and-requests.adoc +++ b/modules/oadp-setting-resource-limits-and-requests.adoc @@ -2,6 +2,7 @@ // // * backup_and_restore/application_backup_and_restore/configuring-oadp.adoc // * virt/backup_restore/virt-installing-configuring-oadp.adoc +// * backup_and_restore/application_backup_and_restore/oadp-aws-sts/oadp-aws-sts.adoc :_mod-docs-content-type: PROCEDURE [id="oadp-setting-resource-limits-and-requests_{context}"] diff --git a/modules/performing-a-backup-oadp-aws-sts.adoc b/modules/performing-a-backup-oadp-aws-sts.adoc new file mode 100644 index 000000000000..fd6fc36a5a94 --- /dev/null +++ b/modules/performing-a-backup-oadp-aws-sts.adoc @@ -0,0 +1,166 @@ +// Module included in the following assemblies: +// +// * backup_and_restore/application_backup_and_restore/oadp-aws-sts/oadp-aws-sts.adoc + +:_mod-docs-content-type: PROCEDURE +[id="performing-a-backup-oadp-aws-sts_{context}"] += Performing a backup with OADP and AWS STS + +The following example `hello-world` application has no persistent volumes (PVs) attached. Perform a backup with {oadp-first} with AWS Security Token Service (AWS STS). + +Either Data Protection Application (DPA) configuration will work. + +. Create a workload to back up by running the following commands: ++ +[source,terminal] +---- +$ oc create namespace hello-world +---- ++ +[source,terminal] +---- +$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift +---- + +. Expose the route by running the following command: ++ +[source,terminal] +---- +$ oc expose service/hello-openshift -n hello-world +---- + +. Check that the application is working by running the following command: ++ +[source,terminal] +---- +$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'` +---- ++ +.Example output +[source,terminal] +---- +Hello OpenShift! +---- + + +. Back up the workload by running the following command: ++ +[source,terminal] +---- +$ cat << EOF | oc create -f - + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: hello-world + namespace: openshift-adp + spec: + includedNamespaces: + - hello-world + storageLocation: ${CLUSTER_NAME}-dpa-1 + ttl: 720h0m0s +EOF +---- + +. Wait until the backup is completed and then run the following command: ++ +[source,terminal] +---- +$ watch "oc -n openshift-adp get backup hello-world -o json | jq .status" +---- ++ +.Example output ++ +[source,json] +---- +{ + "completionTimestamp": "2022-09-07T22:20:44Z", + "expiration": "2022-10-07T22:20:22Z", + "formatVersion": "1.1.0", + "phase": "Completed", + "progress": { + "itemsBackedUp": 58, + "totalItems": 58 + }, + "startTimestamp": "2022-09-07T22:20:22Z", + "version": 1 +} +---- + +. Delete the demo workload by running the following command: ++ +[source,terminal] +---- +$ oc delete ns hello-world +---- + +. Restore the workload from the backup by running the following command: ++ +[source,terminal] +---- +$ cat << EOF | oc create -f - + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: hello-world + namespace: openshift-adp + spec: + backupName: hello-world +EOF +---- + +. Wait for the Restore to finish by running the following command: ++ +[source,terminal] +---- +$ watch "oc -n openshift-adp get restore hello-world -o json | jq .status" +---- ++ +.Example output ++ +[source,json] +---- +{ + "completionTimestamp": "2022-09-07T22:25:47Z", + "phase": "Completed", + "progress": { + "itemsRestored": 38, + "totalItems": 38 + }, + "startTimestamp": "2022-09-07T22:25:28Z", + "warnings": 9 +} +---- + +. Check that the workload is restored by running the following command: ++ +[source,terminal] +---- +$ oc -n hello-world get pods +---- ++ +.Example output ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +hello-openshift-9f885f7c6-kdjpj 1/1 Running 0 90s +---- +. Check the JSONPath by running the following command: ++ +[source,terminal] +---- +$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'` +---- ++ +.Example output ++ +[source,terminal] +---- +Hello OpenShift! +---- + +[NOTE] +==== +For troubleshooting tips, see the OADP team’s link:https://access.redhat.com/articles/5456281[troubleshooting documentation]. +==== + diff --git a/modules/preparing-aws-sts-credentials-for-oadp.adoc b/modules/preparing-aws-sts-credentials-for-oadp.adoc new file mode 100644 index 000000000000..ba3860eb9e08 --- /dev/null +++ b/modules/preparing-aws-sts-credentials-for-oadp.adoc @@ -0,0 +1,177 @@ +// Module included in the following assemblies: +// +// * backup_and_restore/application_backup_and_restore/oadp-aws-sts/oadp-aws-sts.adoc + +:_mod-docs-content-type: PROCEDURE +[id="preparing-aws-sts-credentials-for-oadp_{context}"] += Preparing AWS STS credentials for OADP + +An {aws-full} account must be prepared and configured to accept an {oadp-first} installation. Prepare the {aws-short} credentials by following the proceeding steps. + +.Procedure +. Define the `cluster_name` environment variable by running the following command: ++ +[source,terminal] +---- +$ export CLUSTER_NAME= <1> +---- +<1> The variable can be set to any value. +. Retrieve all the details of the `cluster` such as the `AWS_ACCOUNT_ID, OIDC_ENDPOINT` by running the following command: ++ +[source,terminal] +---- +$ export CLUSTER_VERSION=$(oc get clusterversion version -o jsonpath='{.status.desired.version}{"\n"}') + +export AWS_CLUSTER_ID=$(oc get clusterversion version -o jsonpath='{.spec.clusterID}{"\n"}') + +export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||') + +export REGION=$(oc get infrastructures cluster -o jsonpath='{.status.platformStatus.aws.region}' --allow-missing-template-keys=false || echo us-east-2) + +export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials" +---- +. Create a temporary directory to store all the files by running the following command: ++ +[source,terminal] +---- +$ export SCRATCH="/tmp/${CLUSTER_NAME}/oadp" +mkdir -p ${SCRATCH} +---- +. Display all the gathered details by running the following command: ++ +[source,terminal] +---- +$ echo "Cluster ID: ${AWS_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint: +${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}" +---- +. On the {aws-short} account, create an IAM policy to allow access to {aws-short} S3: + +.. Check to see if the policy exists by running the following commands: ++ +[source,terminal] +---- +$ export POLICY_NAME="OadpVer1" <1> +---- +<1> The variable can be set to any value. ++ +[source,terminal] +---- +$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='$POLICY_NAME'].{ARN:Arn}" --output text) +---- +.. Enter the following command to create the policy JSON file and then create the policy: ++ +[NOTE] +==== +If the policy ARN is not found, the command creates the policy. If the policy ARN already exists, the `if` statement intentionally skips the policy creation. +==== ++ +[source,terminal] +---- +$ if [[ -z "${POLICY_ARN}" ]]; then +cat << EOF > ${SCRATCH}/policy.json +{ +"Version": "2012-10-17", +"Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:PutBucketTagging", + "s3:GetBucketTagging", + "s3:PutEncryptionConfiguration", + "s3:GetEncryptionConfiguration", + "s3:PutLifecycleConfiguration", + "s3:GetLifecycleConfiguration", + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucketMultipartUploads", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts", + "ec2:DescribeSnapshots", + "ec2:DescribeVolumes", + "ec2:DescribeVolumeAttribute", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVolumeStatus", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot" + ], + "Resource": "*" + } +]} +EOF + +POLICY_ARN=$(aws iam create-policy --policy-name $POLICY_NAME \ +--policy-document file:///${SCRATCH}/policy.json --query Policy.Arn \ +--tags Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp \ +--output text) <1> +fi +---- ++ +<1> `SCRATCH` is a name for a temporary directory created for storing the files. + +.. View the policy ARN by running the following command: ++ +[source,terminal] +---- +$ echo ${POLICY_ARN} +---- + +. Create an IAM role trust policy for the cluster: + +.. Create the trust policy file by running the following command: ++ +[source,terminal] +---- +$ cat < ${SCRATCH}/trust-policy.json +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${OIDC_ENDPOINT}:sub": [ + "system:serviceaccount:openshift-adp:openshift-adp-controller-manager", + "system:serviceaccount:openshift-adp:velero"] + } + } + }] +} +EOF +---- + +.. Create an IAM role trust policy for the cluster by running the following command: ++ +[source,terminal] +---- +$ ROLE_ARN=$(aws iam create-role --role-name \ + "${ROLE_NAME}" \ + --assume-role-policy-document file://${SCRATCH}/trust-policy.json \ + --tags Key=cluster_id,Value=${AWS_CLUSTER_ID} Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp --query Role.Arn --output text) +---- + +.. View the role ARN by running the following command: ++ +[source,terminal] +---- +$ echo ${ROLE_ARN} +---- + +. Attach the IAM policy to the IAM role by running the following command: ++ +[source,terminal] +---- +$ aws iam attach-role-policy --role-name "${ROLE_NAME}" --policy-arn ${POLICY_ARN} +---- + + diff --git a/modules/setting-resource-limits-and-requests.adoc b/modules/setting-resource-limits-and-requests.adoc new file mode 100644 index 000000000000..e69de29bb2d1