From a9e843eeeb1027b6827557cfa68a20b90feec7b9 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Fri, 19 Sep 2025 15:21:56 -0400 Subject: [PATCH 01/39] docs: Nested AWS into `Authenticating to the Cloud` --- src/redirects.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/redirects.js b/src/redirects.js index f2b86849a2..b716a4f55d 100644 --- a/src/redirects.js +++ b/src/redirects.js @@ -113,6 +113,10 @@ export const redirects = [ to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", from: "/foundations/pipelines/aws-authentication" }, + { + to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", + from: "/2.0/docs/pipelines/concepts/cloud-auth" + }, { to: "/2.0/docs/pipelines/concepts/overview", from: "/foundations/pipelines/" From ee443a315607c9ba556f79f66107502709501755 Mon Sep 17 00:00:00 2001 From: Josh Padnick Date: Mon, 22 Sep 2025 16:13:58 -0700 Subject: [PATCH 02/39] Fix build issues. --- src/redirects.js | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/redirects.js b/src/redirects.js index b716a4f55d..f2b86849a2 100644 --- a/src/redirects.js +++ b/src/redirects.js @@ -113,10 +113,6 @@ export const redirects = [ to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", from: "/foundations/pipelines/aws-authentication" }, - { - to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", - from: "/2.0/docs/pipelines/concepts/cloud-auth" - }, { to: "/2.0/docs/pipelines/concepts/overview", from: "/foundations/pipelines/" From dc4dd6918b219ac7ca802a5248addc36735fcde4 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 25 Sep 2025 10:23:51 -0400 Subject: [PATCH 03/39] fix: Addressing markdown lints --- docs/2.0/docs/pipelines/architecture/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/2.0/docs/pipelines/architecture/index.md b/docs/2.0/docs/pipelines/architecture/index.md index e75a7905b7..da3951ad39 100644 --- a/docs/2.0/docs/pipelines/architecture/index.md +++ b/docs/2.0/docs/pipelines/architecture/index.md @@ -8,7 +8,7 @@ Outside of the main binary, Pipelines has several other components that work tog By design, customers run the binary as part of their CI/CD pipelines (e.g. GitHub Actions, GitLab CI, etc.). As such, Gruntwork provides out-of-the-box CI/CD configurations for supported platforms when customers sign up for Gruntwork Pipelines. -We likewise provide CI/CD configurations for [Gruntwork Account Factory](https://docs.gruntwork.io/account-factory/overview). +We likewise provide CI/CD configurations for [Gruntwork Account Factory](https://docs.gruntwork.io/account-factory/overview). When using Gruntwork Pipelines without Gruntwork Account Factory, customers are responsible for configuring their repositories to use the appropriate CI/CD configuration for that platform (see [Adding Pipelines to an Existing Repository](/2.0/docs/pipelines/installation/addingexistingrepo) for more information). This code is typically fairly minimal, and the majority of the work is done by reusable workflows made available by Gruntwork, and the binary itself. From cadaf97006179febaaa0baf37a12000c57d6d380 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 25 Sep 2025 16:28:42 -0400 Subject: [PATCH 04/39] fix: Reworked components page into execution flow page --- .../pipelines/architecture/execution-flow.md | 2 +- .../architecture/security-controls.md | 134 ++++++++++++++++++ 2 files changed, 135 insertions(+), 1 deletion(-) diff --git a/docs/2.0/docs/pipelines/architecture/execution-flow.md b/docs/2.0/docs/pipelines/architecture/execution-flow.md index b7cbbbc47b..dbaa9f1326 100644 --- a/docs/2.0/docs/pipelines/architecture/execution-flow.md +++ b/docs/2.0/docs/pipelines/architecture/execution-flow.md @@ -10,6 +10,6 @@ The orchestrator analyzes each infrastructure change in a pull request or git co ## Executor -The executor receives as inputs a pipeline action (e.g. `terragrunt plan`) and a specific unit of infrastructure that has been changed (e.g. `/path/to/changed-unit/terragrunt.hcl`) and executes the specified action on the specified unit. +The executor receives as inputs a pipeline action (e.g. `terragrunt plan`) and a specific unit of infrastructure that has been changed (e.g. `/path/to/changed-unit/terragrunt.hcl`) and executes the specified action on the specified unit. For example, when responding to a `ModuleUpdated` event for `/some/unit/terragrunt.hcl`, the executor might execute a `terragrunt apply` on `/some/unit/terragrunt.hcl`. Or when responding to `AccountsAdded` events on merge, the executor may create a follow-up pull request in the `infrastructure-live-root` repository to include additional IaC code for baselining the newly added accounts. diff --git a/docs/2.0/docs/pipelines/architecture/security-controls.md b/docs/2.0/docs/pipelines/architecture/security-controls.md index 6b88e281b8..979e1fa0f5 100644 --- a/docs/2.0/docs/pipelines/architecture/security-controls.md +++ b/docs/2.0/docs/pipelines/architecture/security-controls.md @@ -74,4 +74,138 @@ To learn more about how Pipelines authenticates to the cloud, read the [Cloud Au A critical aspect of Pipelines' architecture is understanding its trust model. Since Pipelines runs within a CI/CD system, it has privileged access to your infrastructure resources (e.g. AWS accounts, VPCs, EC2 instances, etc.). +<<<<<<< HEAD Anyone with the ability to edit code in the `main` branch of your repositories inherently has the authority to make corresponding changes in your infrastructure resources. For this reason, it is important to follow the [Repository Access](/2.0/docs/pipelines/installation/viamachineusers#repository-access) guidelines to ensure appropriate access control. +======= +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::0123456789012:oidc-provider/token.actions.githubusercontent.com" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "token.actions.githubusercontent.com:sub": "repo:acme/infrastructure-live-root:ref:*" + } + } + } + ] +} +``` + + + + + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::0123456789012:oidc-provider/gitlab.com" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "gitlab.com:sub": "project_path:acme/projectprefix*:*" + } + } + } + ] +} + + +``` + + + + + +Refer to [Configuring OpenId Connect in Amazon Web Services](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services) for additional details. + +### Roles provisioned by DevOps Foundations + +Pipelines automatically provisions specific roles in AWS accounts to support required infrastructure operations. These roles follow the naming pattern `-pipelines-`. + +For example: +- The `root-pipelines-plan` role is used by Pipelines to plan changes in the `infrastructure-live-root` repository. + +These roles are designed to operate in a single repository and include a trust policy that only permits GitHub Actions workflows triggered by that repository to assume the role. Each role is provisioned in pairs: +- `plan` roles, with read-only permissions, are used to execute Terragrunt plans for open pull requests. +- `apply` roles, with read/write permissions, are used to apply or destroy infrastructure changes for merged pull requests or direct pushes to the deploy branch (commonly `main`). + +This separation ensures that controls like [branch protection](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) and [CODEOWNERS files](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) can effectively govern infrastructure changes. + +#### `root-pipelines-plan` + +A read-only plan role for the `infrastructure-live-root` repository. +- This role is one of the first created when setting up DevOps Foundations. It is provisioned manually by the customer during the platform setup process. +- It exists in all accounts and handles tasks necessary for setting up AWS accounts. +- These roles are highly permissive among read-only roles as they manage foundational AWS account setups. + +#### `root-pipelines-apply` + +A read/write apply role for the `infrastructure-live-root` repository. +- Like the plan role, this is one of the initial roles created during setup. +- It is broadly permissive to support foundational account setups and bootstrapping. + +#### `access-control-pipelines-plan` + +A read-only plan role for the `infrastructure-live-access-control` repository. +- These roles are provisioned for new accounts but are not included in core accounts such as `management`, `logs`, `security`, or `shared`. +- They manage IAM roles and policies for vended accounts, facilitating infrastructure access control. + +#### `access-control-pipelines-apply` + +A read/write apply role for the `infrastructure-live-access-control` repository. +- Similar to the plan role, these roles are provisioned for vended accounts but excluded from core accounts. +- They have permissions to manage IAM roles and policies for the accounts where they are provisioned. + +#### `delegated-pipelines-plan` + +A read-only plan role for delegated repositories, used by Pipelines Enterprise customers. + +- These roles are pre-configured to have minimal permissions, primarily for managing OpenTofu/Terraform state. +- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary. +- Users should ensure that only the necessary _read-only_ permissions are granted for the specific delegated repository. + + +:::note + +These roles have almost no permissions by default. They are pre-configured by default to only have access to OpenTofu/Terraform state, and the pull requests that are opened to provision them include documentation on how to add additional permissions as appropriate. + +It is up to the user provisioning these roles to ensure that this role has only the necessary _read-only_ permissions required to manage infrastructure changes relevant to the delegated repository. + +::: + +#### `delegated-pipelines-apply` + +A read/write apply role for delegated repositories. +- Similar to the plan role, these roles are pre-configured with minimal permissions and are intended for managing OpenTofu/Terraform state. +- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary. +- Users must ensure that the role has only the necessary _read/write_ permissions required for the delegated repository. + +:::note +The `delegated-pipelines-plan` and `delegated-pipelines-apply` roles are automatically provisioned for new delegated accounts. Enterprise customers will see pull requests created in the `infrastructure-live-access-control` repository to vend these roles with proper configurations. +::: + +## Trust boundaries + +A critical aspect of Pipelines' architecture is understanding its trust model. Since Pipelines runs within a CI/CD system, it has privileged access to your infrastructure resources (e.g. AWS accounts, VPCs, EC2 instances, etc.). + +Anyone with the ability to edit code in the `main` branch of your repositories inherently has the authority to make corresponding changes in your infrastructure resources. For this reason, it is important to follow the [Repository Access](/2.0/docs/pipelines/installation/viamachineusers#repository-access) guidelines to ensure appropriate access control. + +:::tip + +Each AWS IAM role provisioned through setup of [Gruntwork Account Factory](https://docs.gruntwork.io/account-factory/overview) is configured to trust a single repository (and, for apply roles, a single branch). If a role's permissions become overly broad, consider creating a new role with more granular permissions tailored to the specific use case. Use the `infrastructure-live-access-control` repository to define and manage these roles. + +::: +>>>>>>> 9d212600 (fix: Reworked components page into execution flow page) From 2b762cda192ebbd24e4da0daaf2e02be423d32a9 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 25 Sep 2025 16:45:59 -0400 Subject: [PATCH 05/39] docs: Migrating out AWS specific security controls for Pipelines to Account Factory --- .../architecture/security-controls.md | 134 ------------------ 1 file changed, 134 deletions(-) diff --git a/docs/2.0/docs/pipelines/architecture/security-controls.md b/docs/2.0/docs/pipelines/architecture/security-controls.md index 979e1fa0f5..6b88e281b8 100644 --- a/docs/2.0/docs/pipelines/architecture/security-controls.md +++ b/docs/2.0/docs/pipelines/architecture/security-controls.md @@ -74,138 +74,4 @@ To learn more about how Pipelines authenticates to the cloud, read the [Cloud Au A critical aspect of Pipelines' architecture is understanding its trust model. Since Pipelines runs within a CI/CD system, it has privileged access to your infrastructure resources (e.g. AWS accounts, VPCs, EC2 instances, etc.). -<<<<<<< HEAD Anyone with the ability to edit code in the `main` branch of your repositories inherently has the authority to make corresponding changes in your infrastructure resources. For this reason, it is important to follow the [Repository Access](/2.0/docs/pipelines/installation/viamachineusers#repository-access) guidelines to ensure appropriate access control. -======= -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "", - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::0123456789012:oidc-provider/token.actions.githubusercontent.com" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringLike": { - "token.actions.githubusercontent.com:sub": "repo:acme/infrastructure-live-root:ref:*" - } - } - } - ] -} -``` - - - - - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "", - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::0123456789012:oidc-provider/gitlab.com" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringLike": { - "gitlab.com:sub": "project_path:acme/projectprefix*:*" - } - } - } - ] -} - - -``` - - - - - -Refer to [Configuring OpenId Connect in Amazon Web Services](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services) for additional details. - -### Roles provisioned by DevOps Foundations - -Pipelines automatically provisions specific roles in AWS accounts to support required infrastructure operations. These roles follow the naming pattern `-pipelines-`. - -For example: -- The `root-pipelines-plan` role is used by Pipelines to plan changes in the `infrastructure-live-root` repository. - -These roles are designed to operate in a single repository and include a trust policy that only permits GitHub Actions workflows triggered by that repository to assume the role. Each role is provisioned in pairs: -- `plan` roles, with read-only permissions, are used to execute Terragrunt plans for open pull requests. -- `apply` roles, with read/write permissions, are used to apply or destroy infrastructure changes for merged pull requests or direct pushes to the deploy branch (commonly `main`). - -This separation ensures that controls like [branch protection](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) and [CODEOWNERS files](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) can effectively govern infrastructure changes. - -#### `root-pipelines-plan` - -A read-only plan role for the `infrastructure-live-root` repository. -- This role is one of the first created when setting up DevOps Foundations. It is provisioned manually by the customer during the platform setup process. -- It exists in all accounts and handles tasks necessary for setting up AWS accounts. -- These roles are highly permissive among read-only roles as they manage foundational AWS account setups. - -#### `root-pipelines-apply` - -A read/write apply role for the `infrastructure-live-root` repository. -- Like the plan role, this is one of the initial roles created during setup. -- It is broadly permissive to support foundational account setups and bootstrapping. - -#### `access-control-pipelines-plan` - -A read-only plan role for the `infrastructure-live-access-control` repository. -- These roles are provisioned for new accounts but are not included in core accounts such as `management`, `logs`, `security`, or `shared`. -- They manage IAM roles and policies for vended accounts, facilitating infrastructure access control. - -#### `access-control-pipelines-apply` - -A read/write apply role for the `infrastructure-live-access-control` repository. -- Similar to the plan role, these roles are provisioned for vended accounts but excluded from core accounts. -- They have permissions to manage IAM roles and policies for the accounts where they are provisioned. - -#### `delegated-pipelines-plan` - -A read-only plan role for delegated repositories, used by Pipelines Enterprise customers. - -- These roles are pre-configured to have minimal permissions, primarily for managing OpenTofu/Terraform state. -- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary. -- Users should ensure that only the necessary _read-only_ permissions are granted for the specific delegated repository. - - -:::note - -These roles have almost no permissions by default. They are pre-configured by default to only have access to OpenTofu/Terraform state, and the pull requests that are opened to provision them include documentation on how to add additional permissions as appropriate. - -It is up to the user provisioning these roles to ensure that this role has only the necessary _read-only_ permissions required to manage infrastructure changes relevant to the delegated repository. - -::: - -#### `delegated-pipelines-apply` - -A read/write apply role for delegated repositories. -- Similar to the plan role, these roles are pre-configured with minimal permissions and are intended for managing OpenTofu/Terraform state. -- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary. -- Users must ensure that the role has only the necessary _read/write_ permissions required for the delegated repository. - -:::note -The `delegated-pipelines-plan` and `delegated-pipelines-apply` roles are automatically provisioned for new delegated accounts. Enterprise customers will see pull requests created in the `infrastructure-live-access-control` repository to vend these roles with proper configurations. -::: - -## Trust boundaries - -A critical aspect of Pipelines' architecture is understanding its trust model. Since Pipelines runs within a CI/CD system, it has privileged access to your infrastructure resources (e.g. AWS accounts, VPCs, EC2 instances, etc.). - -Anyone with the ability to edit code in the `main` branch of your repositories inherently has the authority to make corresponding changes in your infrastructure resources. For this reason, it is important to follow the [Repository Access](/2.0/docs/pipelines/installation/viamachineusers#repository-access) guidelines to ensure appropriate access control. - -:::tip - -Each AWS IAM role provisioned through setup of [Gruntwork Account Factory](https://docs.gruntwork.io/account-factory/overview) is configured to trust a single repository (and, for apply roles, a single branch). If a role's permissions become overly broad, consider creating a new role with more granular permissions tailored to the specific use case. Use the `infrastructure-live-access-control` repository to define and manage these roles. - -::: ->>>>>>> 9d212600 (fix: Reworked components page into execution flow page) From bb7c722a7d5b8576ef0471b1ca9ec99fc4ba55bf Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 25 Sep 2025 17:09:48 -0400 Subject: [PATCH 06/39] docs: Updating `ci-workflows.md` with call outs for Account Factory stuff --- docs/2.0/docs/accountfactory/architecture/security-controls.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/2.0/docs/accountfactory/architecture/security-controls.md b/docs/2.0/docs/accountfactory/architecture/security-controls.md index d41576a336..cd602b2fb7 100644 --- a/docs/2.0/docs/accountfactory/architecture/security-controls.md +++ b/docs/2.0/docs/accountfactory/architecture/security-controls.md @@ -4,6 +4,8 @@ Gruntwork Account Factory employs a defense-in-depth approach to secure workflow Account Factory relies on Pipelines to drive infrastructure changes via GitOps workflows, so make sure to read the [Pipelines security controls](/2.0/docs/pipelines/architecture/security-controls) for more details on how Pipelines secures workflows. +Account Factory relies on Pipelines to drive infrastructure changes via GitOps workflows, so make sure to read the [Pipelines security controls](/2.0/docs/pipelines/architecture/security-controls) for more details on how Pipelines secures workflows. + ## Least privilege principle Account Factory adheres to the principle of least privilege, configuring the AWS IAM roles vended as part of Account Factory onboarding to grant only the necessary permissions for infrastructure actions relevant for Account Factory to operate correctly, and to only trust the `infrastructure-live-root` repository for role assumption. From 08721841d2cf4702e2f9fe6a8db32c037b5b5c7d Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 13:42:18 -0400 Subject: [PATCH 07/39] docs: Addressing PR feedback --- docs/2.0/docs/accountfactory/architecture/security-controls.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/2.0/docs/accountfactory/architecture/security-controls.md b/docs/2.0/docs/accountfactory/architecture/security-controls.md index cd602b2fb7..d41576a336 100644 --- a/docs/2.0/docs/accountfactory/architecture/security-controls.md +++ b/docs/2.0/docs/accountfactory/architecture/security-controls.md @@ -4,8 +4,6 @@ Gruntwork Account Factory employs a defense-in-depth approach to secure workflow Account Factory relies on Pipelines to drive infrastructure changes via GitOps workflows, so make sure to read the [Pipelines security controls](/2.0/docs/pipelines/architecture/security-controls) for more details on how Pipelines secures workflows. -Account Factory relies on Pipelines to drive infrastructure changes via GitOps workflows, so make sure to read the [Pipelines security controls](/2.0/docs/pipelines/architecture/security-controls) for more details on how Pipelines secures workflows. - ## Least privilege principle Account Factory adheres to the principle of least privilege, configuring the AWS IAM roles vended as part of Account Factory onboarding to grant only the necessary permissions for infrastructure actions relevant for Account Factory to operate correctly, and to only trust the `infrastructure-live-root` repository for role assumption. From 061bbeba25617d4d69d0697851e0ee04328efd4d Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 16:06:35 -0400 Subject: [PATCH 08/39] fix: Adding abbreviation to dictionary --- custom-dictionary.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/custom-dictionary.txt b/custom-dictionary.txt index 3ccf201a08..0b7ec3f155 100644 --- a/custom-dictionary.txt +++ b/custom-dictionary.txt @@ -62,3 +62,4 @@ hcledit self-hosting infrachanges Entra +GLMU From 3165aa879d3da0e2d84193e0c15725cd4fa02299 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Fri, 19 Sep 2025 15:21:56 -0400 Subject: [PATCH 09/39] docs: Nested AWS into `Authenticating to the Cloud` --- src/redirects.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/redirects.js b/src/redirects.js index f2b86849a2..b716a4f55d 100644 --- a/src/redirects.js +++ b/src/redirects.js @@ -113,6 +113,10 @@ export const redirects = [ to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", from: "/foundations/pipelines/aws-authentication" }, + { + to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", + from: "/2.0/docs/pipelines/concepts/cloud-auth" + }, { to: "/2.0/docs/pipelines/concepts/overview", from: "/foundations/pipelines/" From 12e2b1bc951e3f96cda37ad840fcde563cd981a0 Mon Sep 17 00:00:00 2001 From: Josh Padnick Date: Mon, 22 Sep 2025 16:13:58 -0700 Subject: [PATCH 10/39] Fix build issues. --- src/redirects.js | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/redirects.js b/src/redirects.js index b716a4f55d..f2b86849a2 100644 --- a/src/redirects.js +++ b/src/redirects.js @@ -113,10 +113,6 @@ export const redirects = [ to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", from: "/foundations/pipelines/aws-authentication" }, - { - to: "/2.0/docs/pipelines/concepts/cloud-auth/aws", - from: "/2.0/docs/pipelines/concepts/cloud-auth" - }, { to: "/2.0/docs/pipelines/concepts/overview", from: "/foundations/pipelines/" From a5b4d9d818df84e231e5a5f2c5f7652e5fa3872f Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 25 Sep 2025 17:36:04 -0400 Subject: [PATCH 11/39] docs: Moving AWS Landing Zone prereq to Account Factory docs: Adjusting redirects for moving AWS Landing Zone to Account Factory --- .../docs/accountfactory/installation/index.md | 9 +-- .../prerequisites/awslandingzone.md | 10 ++- .../pipelines/installation/authoverview.md | 16 +++-- .../pipelines/installation/viagithubapp.md | 26 ++++---- .../pipelines/installation/viamachineusers.md | 61 +++++++++++++------ sidebars/docs.js | 24 ++++---- src/redirects.js | 12 ++-- 7 files changed, 93 insertions(+), 65 deletions(-) rename docs/2.0/docs/{pipelines/installation => accountfactory}/prerequisites/awslandingzone.md (97%) diff --git a/docs/2.0/docs/accountfactory/installation/index.md b/docs/2.0/docs/accountfactory/installation/index.md index 11a240e3b7..b7a81b99f2 100644 --- a/docs/2.0/docs/accountfactory/installation/index.md +++ b/docs/2.0/docs/accountfactory/installation/index.md @@ -7,11 +7,11 @@ Account Factory is automatically integrated into [new Pipelines root repositorie By default, Account Factory includes the following components: - 📋 An HTML form for generating workflow inputs: `.github/workflows/account-factory-inputs.html` - + - 🏭 A workflow for generating new requests: `.github/workflows/account-factory.yml` - + - 🗃️ A root directory for tracking account requests: `_new-account-requests` - + - ⚙️ A YAML file for tracking account names and IDs: `accounts.yml` For detailed instructions on using these components, refer to the [Vending a New AWS Account Guide](/2.0/docs/accountfactory/guides/vend-aws-account). @@ -19,6 +19,3 @@ For detailed instructions on using these components, refer to the [Vending a New ## Configuring account factory Account Factory is fully operational for vending new accounts without requiring any configuration changes. However, a [comprehensive reference for all configuration options is available here](/2.0/reference/accountfactory/configurations), allowing you to customize values and templates for generating Infrastructure as Code (IaC) for new accounts. - - - diff --git a/docs/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md b/docs/2.0/docs/accountfactory/prerequisites/awslandingzone.md similarity index 97% rename from docs/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md rename to docs/2.0/docs/accountfactory/prerequisites/awslandingzone.md index 397d5ba301..073482cd19 100644 --- a/docs/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md +++ b/docs/2.0/docs/accountfactory/prerequisites/awslandingzone.md @@ -1,11 +1,10 @@ import CustomizableValue from '/src/components/CustomizableValue'; - # Landing Zone ## Overview -The Landing Zone component establishes an initial best-practice AWS multi-account setup. +The Landing Zone component establishes an initial best-practice AWS multi-account setup for use with Gruntwork Account Factory. ## Extending AWS Control Tower @@ -242,16 +241,15 @@ Complete the following steps to prepare for Gruntwork Account Factory: 3. Switch to the `Users` tab, select your management user from the list and click **Next** - 4. Select `AWSAdministratorAccess` from the list of Permission Sets, then click **Next** + 4. Select `AWSAdministratorAccess` from the list of Permission Sets, then click **Next** - 5. Click `Submit` to finish assigning access to your user + 5. Click `Submit` to finish assigning access to your user ## Next steps Now that Control Tower is configured, consider these next steps: + - [Set up IAM Identity Center](https://docs.aws.amazon.com/singlesignon/latest/userguide/get-started-choose-identity-source.html) for access control. - [Apply required controls or SCPs](https://docs.aws.amazon.com/controltower/latest/userguide/controls.html). - [Install Gruntwork Pipelines](/2.0/docs/pipelines/installation/viagithubapp). - [Set up Gruntwork Account Factory](/2.0/docs/accountfactory/installation). - - diff --git a/docs/2.0/docs/pipelines/installation/authoverview.md b/docs/2.0/docs/pipelines/installation/authoverview.md index 6ed8a85d8d..74e34b0c7c 100644 --- a/docs/2.0/docs/pipelines/installation/authoverview.md +++ b/docs/2.0/docs/pipelines/installation/authoverview.md @@ -1,12 +1,13 @@ # Authenticating Gruntwork Pipelines Gruntwork Pipelines requires authentication with GitHub/GitLab to perform various functions, including: -* Downloading Gruntwork code, such as the Pipelines binary and Terraform modules, from the `gruntwork-io` GitHub organization. -* Interacting with your repositories, such as: - * Creating pull requests. - * Commenting on pull requests. - * Creating new repositories via Account Factory. - * Updating repository settings, such as enforcing branch protection, via Account Factory. + +- Downloading Gruntwork code, such as the Pipelines binary and Terraform modules, from the `gruntwork-io` GitHub organization. +- Interacting with your repositories, such as: + - Creating pull requests. + - Commenting on pull requests. + - Creating new repositories via Account Factory. + - Updating repository settings, such as enforcing branch protection, via Account Factory. Gruntwork provides two authentication methods: a [GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md) and CI Users ([Machine Users](/2.0/docs/pipelines/installation/viamachineusers.md)) with personal access tokens for Pipelines. @@ -15,11 +16,14 @@ Both approaches support the core functionality of Pipelines. However, the GitHub ## Summary of authentication mechanisms for GitHub **Advantages of the GitHub App**: + - Simplified setup process. - Access to enhanced features and functionality. - Improved user experience during regular operations. - Reduced maintenance, as there is no need to install, maintain, or rotate powerful tokens. **Advantages of Machine Users**: + - Compatibility with on-premises GitHub Enterprise installations that cannot interact with third-party servers (e.g., Gruntwork's backend). - Provides a fallback solution to ensure Pipelines continue functioning in the unlikely event of an outage affecting the Gruntwork-hosted backend that powers the GitHub App. +- Allows GitLab customers to download the Pipelines binary from GitLab CI Pipelines. diff --git a/docs/2.0/docs/pipelines/installation/viagithubapp.md b/docs/2.0/docs/pipelines/installation/viagithubapp.md index b2dd70375c..ebec5153a2 100644 --- a/docs/2.0/docs/pipelines/installation/viagithubapp.md +++ b/docs/2.0/docs/pipelines/installation/viagithubapp.md @@ -13,6 +13,7 @@ The [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) is a [GitHub At this time Gruntwork does not provide an app for GitLab, this page is only relevant for Gruntwork Pipelines users installing in GitHub. ::: + ## Overview There are three major components to keep in mind when working with the Gruntwork.io GitHub App: @@ -28,6 +29,7 @@ The Gruntwork.io GitHub App is the principal that Gruntwork products will utiliz #### Required Permissions As of 2024/09/10, the Gruntwork.io GitHub App requests the following permissions: + - **Read access to Actions**: Allows the app to read GitHub Actions artifacts. - **Write access to Administration**: Allows the app to create new repositories, and add teams as collaborators to repositories. - **Write access to Contents**: Allows the app to read and write repository contents. @@ -40,13 +42,15 @@ As of 2024/09/10, the Gruntwork.io GitHub App requests the following permissions Gruntwork.io requests all of these permissions because it requires them for different operations. Unfortunately, the way GitHub apps work prevents us from requesting permissions on a more granular basis. Know that the GitHub App Service will scope down its permissions whenever possible to the minimum required for the operation at hand. - The level of granularity available to customers when configuring the GitHub App installation is to either install the app on a per-repository basis or on an entire organization. Our recommendation is as follows: + The level of granularity available to customers when configuring the GitHub App installation is to either install the app on a per-repository basis or on an entire organization. Our recommendation is as follows for Account Factory customers: + + - For non-enterprise customers, allow the app for `infrastructure-live-root` repository and (if in-use) `infrastructure-live-access-control` and `infrastructure-catalog`. - * For non-enterprise customers, allow the app for `infrastructure-live-root` repository and (if in-use) `infrastructure-live-access-control` and `infrastructure-catalog`. - * For enterprise customers, allow the app to have access to the entire organization. + - For enterprise customers, allow the app to have access to the entire organization. -The reasoning for requiring entire-organization access for enterprise customers is that if you are using Account Factory to create delegated repositories then Account Factory will be creating, and then immediately modifying, new repositories in automated flows, which means it needs access to new repos as soon as they are created which is only possible with entire organization permission. + For non-Account Factory customers, we recommend installing the app on a per-repository basis. + The reasoning for requiring entire-organization access for enterprise customers is that if you are using Account Factory to create delegated repositories then Account Factory will be creating, and then immediately modifying, new repositories in automated flows, which means it needs access to new repos as soon as they are created which is only possible with entire organization permission. If you are unsure how to proceed here, reach out to Gruntwork Support for guidance. @@ -108,7 +112,7 @@ The GitHub App Service is used by two major clients: 2. **Gruntwork Pipelines** - The main client for the Gruntwork.io App, and where most of the value is derived. Pipelines uses the GitHub App Service to acquire the relevant access for interacting with GitHub resources on behalf of the user. Access control rules are enforced here to ensure that only the level of access required, and explicitly specified in the Gruntwork Developer Portal can be used by Pipelines to interact with GitHub resources on behalf of the user. + The main client for the Gruntwork.io App, and where most of the value is derived. Pipelines uses the GitHub App Service to acquire the relevant access for interacting with GitHub resources on behalf of the user. Access control rules are enforced here to ensure that only the level of access required (and explicitly specified in the Gruntwork Developer Portal) can be used by Pipelines to interact with GitHub resources on behalf of the user. For example, while the Gruntwork.io GitHub App does have permissions to create new repositories, Pipelines will only do so if a workflow originating from a configured `infrastructure-live-root` repository requests it. @@ -118,7 +122,7 @@ The availability of the Gruntwork.io GitHub App is something Gruntwork will ende Any downtime of Gruntwork services will not impact the ability of your team to manage infrastructure using Gruntwork products. -#### App Only Features +### App Only Features The following features of the Gruntwork.io GitHub App will be unavailable during downtime: @@ -126,11 +130,11 @@ The following features of the Gruntwork.io GitHub App will be unavailable during - **Gruntwork Pipelines Comments**: While Pipelines will allow for IaC updates in a degraded state without the availability of the GitHub App, comments are a feature that rely on the availability of the app for the best experience. - **Gruntwork Pipelines Drift Detection**: Drift detection requires the availability of the GitHub App to function correctly. -#### Fallback +### Fallback -In order to ensure that the availability of the Gruntwork.io GitHub App is not something that can impair the ability of users to drive infrastructure updates, the legacy mechanism of authenticating with GitHub using [Machine users](/2.0/docs/pipelines/installation/viamachineusers.md) is still supported. +In order to ensure that the availability of the Gruntwork.io GitHub App is not something that can impair the ability of users to drive infrastructure updates, users can also authenticate with GitHub using [Machine users](/2.0/docs/pipelines/installation/viamachineusers.md). -Configuring the `PIPELINES_READ_TOKEN`, `INFRA_ROOT_WRITE_TOKEN` and `ORG_REPO_ADMIN_TOKEN` where necessary (following the documentation linked above) will result in Pipelines using the legacy mechanism to authenticate with GitHub, rather than the Gruntwork.io GitHub App. +Configuring the `PIPELINES_READ_TOKEN`, `INFRA_ROOT_WRITE_TOKEN` and `ORG_REPO_ADMIN_TOKEN` where necessary (following the documentation linked above) will result in Pipelines using the machine users mechanism to authenticate with GitHub, rather than the Gruntwork.io GitHub App. Using these fallback tokens will ensure that Pipelines can continue to perform operations like: @@ -160,7 +164,7 @@ To install the Gruntwork.io GitHub App in your organization follow these steps. ## Configuration -

Infrastructure Live Root Repositories

+### Infrastructure Live Root Repositories DevOps Foundations treats certain repositories as especially privileged in order to perform critical operations like vending new AWS accounts and creating new repositories. These repositories are called "infrastructure live root repositories" and you can configure them in the [GitHub Account section](https://app.gruntwork.io/account?scroll_to=github-app) for your organization in the Gruntwork developer portal **if you are a designated administrator**. @@ -174,7 +178,7 @@ For more information, see the [relevant architecture documentation](/2.0/docs/pi ## Frequently Asked Questions -#### How do I find my Gruntwork.io GitHub App installation ID? +### How do I find my Gruntwork.io GitHub App installation ID? You can find the installation ID of the Gruntwork.io GitHub App in the URL of the installation page. diff --git a/docs/2.0/docs/pipelines/installation/viamachineusers.md b/docs/2.0/docs/pipelines/installation/viamachineusers.md index 7e28d5de9a..7d0d648c85 100644 --- a/docs/2.0/docs/pipelines/installation/viamachineusers.md +++ b/docs/2.0/docs/pipelines/installation/viamachineusers.md @@ -3,12 +3,14 @@ toc_min_heading_level: 2 toc_max_heading_level: 4 --- + # Setting up Pipelines via GitHub Machine Users + import PersistentCheckbox from '/src/components/PersistentCheckbox'; import Tabs from "@theme/Tabs" import TabItem from "@theme/TabItem" -For GitHub users, of the [two methods](/2.0/docs/pipelines/installation/authoverview.md) for installing Gruntwork Pipelines, we strongly recommend using the [GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md). However, if the GitHub App cannot be used or if machine users are required as a [fallback](http://localhost:3000/2.0/docs/pipelines/installation/viagithubapp#fallback), this guide outlines how to set up authentication for Pipelines using access tokens and machine users. +For GitHub users, of the [two methods](/2.0/docs/pipelines/installation/authoverview.md) for installing Gruntwork Pipelines, we strongly recommend using the [GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md). However, if the GitHub App cannot be used or if machine users are required as a [fallback](/2.0/docs/pipelines/installation/viagithubapp#fallback), this guide outlines how to set up authentication for Pipelines using access tokens and machine users. For GitHub or GitLab users, when using tokens, Gruntwork recommends setting up CI users specifically for Gruntwork Pipelines, separate from human users in your organization. This separation ensures workflows are not disrupted if an employee leaves the company and allows for more precise permission management. Additionally, using CI users allow you to apply granular permissions that may normally be too restrictive for a normal employee to do their daily work. @@ -19,6 +21,7 @@ This guide will take approximately 30 minutes to complete. ::: ## Background + ### Guidance on storing secrets During this process, you will generate and securely store several access tokens. Use a temporary but secure location for these sensitive values between generating them and storing them in GitHub or GitLab. Follow your organization's security best practices and avoid insecure methods (e.g., Slack or sticky notes) during this exercise. @@ -87,16 +90,17 @@ GitLab uses access tokens for authentication. There are several types of access For Pipelines, we recommend using Project or Group Access Tokens. -Note that Project and Group access tokens are only available in certain GitLab licenses. Specifically: +Note that Project and Group access tokens are only available in certain GitLab licenses. Specifically: [Project Access Tokens](https://docs.gitlab.com/user/project/settings/project_access_tokens/#token-availability) -* On GitLab SaaS: If you have the Premium or Ultimate license tier, only one project access token is available with a [trial license](https://about.gitlab.com/free-trial/). -* On GitLab Self-Managed instances: With any license tier. If you have the Free tier, consider [restricting the creation of project access tokens](https://docs.gitlab.com/user/project/settings/project_access_tokens/#restrict-the-creation-of-project-access-tokens) to lower potential abuse. + +- On GitLab SaaS: If you have the Premium or Ultimate license tier, only one project access token is available with a [trial license](https://about.gitlab.com/free-trial/). +- On GitLab Self-Managed instances: With any license tier. If you have the Free tier, consider [restricting the creation of project access tokens](https://docs.gitlab.com/user/project/settings/project_access_tokens/#restrict-the-creation-of-project-access-tokens) to lower potential abuse. [Group Access Tokens](https://docs.gitlab.com/user/group/settings/group_access_tokens/) -* On GitLab.com, you can use group access tokens if you have the Premium or Ultimate license tier. Group access tokens are not available with a [trial license](https://about.gitlab.com/free-trial/). -* On GitLab Dedicated and self-managed instances, you can use group access tokens with any license tier. +- On GitLab.com, you can use group access tokens if you have the Premium or Ultimate license tier. Group access tokens are not available with a [trial license](https://about.gitlab.com/free-trial/). +- On GitLab Dedicated and self-managed instances, you can use group access tokens with any license tier. @@ -116,7 +120,7 @@ Both the `ci-user` and the `ci-read-only-user` must: 1. Be members of your GitHub Organization. -2. Be added to your team in **Gruntwork**’s GitHub Organization (See [instructions on inviting a user to your team](https://docs.gruntwork.io/developer-portal/invite-team#inviting-team-members) and [linking the user’s GitHub ID to Gruntwork](https://docs.gruntwork.io/developer-portal/link-github-id)). +2. Be added to your team in **Gruntwork**’s GitHub Organization (See [instructions on inviting a user to your team](https://docs.gruntwork.io/developer-portal/invite-team#inviting-team-members) and [linking the user’s GitHub ID to Gruntwork](https://docs.gruntwork.io/developer-portal/link-github-id)). :::tip We recommend creating two machine users for better access control, but you may adjust this setup to fit your organization’s needs. Ensure permissions are appropriate for their roles, and note that additional GitHub licenses may be required if at capacity. @@ -141,6 +145,7 @@ Ensure the `ci-user` has write access to your: - `infrastructure-live-access-control` repository **Checklist:** + **Create access tokens for the `ci-user`** @@ -148,13 +153,13 @@ Ensure the `ci-user` has write access to your: Generate the required tokens for the ci-user in their GitHub account. **Checklist:** + - #### INFRA_ROOT_WRITE_TOKEN -This [fine-grained](#fine-grained) Personal Access Token allows GitHub Actions to clone `infrastructure-live-root`, open pull requests, and update comments. +This [fine-grained](#fine-grained-tokens) Personal Access Token allows GitHub Actions to clone `infrastructure-live-root`, open pull requests, and update comments. This token must have the following permissions to the `INFRA_ROOT_WRITE_TOKEN` for the `infrastructure-live-root` repository: @@ -175,18 +180,23 @@ Below is a detailed breakdown of the permissions needed for the `INFRA_ROOT_WRIT If you are not an Enterprise customer or prefer Pipelines not to execute certain behaviors, you can opt not to grant the related permissions. ##### Content read & write access + Needed for cloning `infrastructure-live-root` and pushing automated changes. Without this permission, the pull request opened by the GitHub Actions workflow will not trigger automation during account vending. ##### Issues read & write access + Allows Pipelines to open issues that alert teams when manual action is required. ##### Metadata read access + Grants visibility into repository metadata. ##### Pull requests read & write access + Allows Pipelines to create pull requests to introduce infrastructure changes. ##### Workflows read & write access + Required to update workflows when provisioning new repositories. @@ -215,27 +225,31 @@ The following is a breakdown of the permissions needed for the `ORG_REPO_ADMIN_T If you are not an Enterprise customer or prefer Pipelines not to carry out certain actions, you can choose to withhold the related permissions. ##### Administration read & write access + Allows the creation of new repositories for delegated infrastructure management. ##### Content read & write access + Used for bootstrapping repositories and populating them with necessary content. ##### Metadata read access + Grants repository-level insights needed for automation. ##### Pull requests read & write access - This is required to open pull requests. When vending delegated repositories for Enterprise customers, Pipelines will open pull requests to automate the process of introducing new Infrastructure as Code changes to drive infrastructure updates. + +This is required to open pull requests. When vending delegated repositories for Enterprise customers, Pipelines will open pull requests to automate the process of introducing new Infrastructure as Code changes to drive infrastructure updates. ##### Workflows read & write access - This is required to update GitHub Action workflow files. When vending delegated repositories for Enterprise customers, Pipelines will create new repositories, including content in the `.github/workflows` directory. Without this permission, Pipelines would not be able to provision repositories with this content. + +This is required to update GitHub Action workflow files. When vending delegated repositories for Enterprise customers, Pipelines will create new repositories, including content in the `.github/workflows` directory. Without this permission, Pipelines would not be able to provision repositories with this content. ##### Members read & write access - Required to update GitHub organization team members. When vending delegated repositories for Enterprise customers, Pipelines will add team members to a team that has access to a delegated repository. Without this permission, Pipelines would not be able to provision repositories that are accessible to the correct team members. +Required to update GitHub organization team members. When vending delegated repositories for Enterprise customers, Pipelines will add team members to a team that has access to a delegated repository. Without this permission, Pipelines would not be able to provision repositories that are accessible to the correct team members. - :::tip If you are not an Enterprise customer, you should delete it after DevOps Foundations setup. ::: @@ -244,13 +258,14 @@ If you are not an Enterprise customer, you should delete it after DevOps Foundat The `ci-read-only-user` is configured to download private software within GitHub Actions workflows. This user is responsible for accessing Gruntwork IaC Library modules, your infrastructure-modules repository, other private custom module repositories, and the Pipelines CLI. -This user should use a single classic Personal Access Token (PAT)(https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#personal-access-tokens-classic) with read-only permissions. Since classic PATs offer coarse grained access controls, it’s recommended to assign this user to a GitHub team with READ access limited to the `infrastructure-live-root` repository and any relevant module repositories within your GitHub Organization. Adding this user to the Gruntwork Developer Portal will automatically grant access to the Gruntwork IaC Library. +This user should use a single classic [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#personal-access-tokens-classic) with read-only permissions. Since classic PATs offer coarse grained access controls, it’s recommended to assign this user to a GitHub team with READ access limited to the `infrastructure-live-root` repository and any relevant module repositories within your GitHub Organization. Adding this user to the Gruntwork Developer Portal will automatically grant access to the Gruntwork IaC Library. **Invite ci-read-only-user to your repository** Invite `ci-user-read-only` to your `infrastructure-live-root` repository with read access. **Checklist:** + **Create a token for ci-read-only-user** @@ -260,8 +275,6 @@ Generate the following token for the `ci-read-only-user`: **Checklist:** - - #### PIPELINES_READ_TOKEN This [Classic Personal Access Token](#classic-tokens) manages access to private software during GitHub Action runs. @@ -275,6 +288,7 @@ This token must have `repo` scopes. Gruntwork recommends setting expiration to 9 Make sure both machine users are added to your team in Gruntwork’s GitHub Organization. Refer to the [instructions for inviting a user to your team](https://docs.gruntwork.io/developer-portal/invite-team#inviting-team-members) and [linking the user’s GitHub ID to Gruntwork](https://docs.gruntwork.io/developer-portal/link-github-id) for guidance. **Checklist:** + ## Configure secrets for GitHub Actions @@ -287,11 +301,14 @@ Since this guide uses secrets scoped to specific repositories, the token permiss + **Checklist:** +
+ 1. Navigate to your top-level GitHub Organization and select the **Settings** tab. 2. From the navigation bar on the left side, choose **Secrets and variables**, then select **Actions**. @@ -345,13 +362,16 @@ For more details on creating and using GitHub Actions Organization secrets, refe
+ **Checklist:** +
+ Gruntwork Pipelines retrieves these secrets from GitHub Actions secrets configured in the repository. For instructions on creating repository Actions secrets, refer to [creating secrets for a repository](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository). ### `infrastructure-live-root` @@ -378,8 +398,8 @@ If you are **not an Enterprise customer**, you should also do the following: - Delete the `ORG_REPO_ADMIN_TOKEN` Personal Access Token from the `ci-user`’s GitHub account. - Remove the `ORG_REPO_ADMIN_TOKEN` Repository secret from the `infrastructure-live-root` repository. -::: +::: :::info For more information on creating and using GitHub Actions Repository secrets, refer to the [GitHub Documentation](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository). @@ -391,13 +411,12 @@ For more information on creating and using GitHub Actions Repository secrets, re
- -For GitLab, Gruntwork Pipelines two CI variables. The first, the `PIPELINES_GITLAB_TOKEN` requires the `Developer`, `Maintainer` or `Owner` role and the scopes listed below. This token will be used to authenticate API calls and access repositories within your GitLab group. The second, the `PIPELINES_GITLAB_READ_TOKEN` will be used to access your own code within GitLab. If not set, Pipelines will default to the `CI_JOB_TOKEN` when accessing internal GitLab hosted code. - +For GitLab, Gruntwork Pipelines two CI variables. The first, the `PIPELINES_GITLAB_TOKEN` requires the `Developer`, `Maintainer` or `Owner` role and the scopes listed below. This token will be used to authenticate API calls and access repositories within your GitLab group. The second, the `PIPELINES_GITLAB_READ_TOKEN` will be used to access your own code within GitLab. If not set, Pipelines will default to the `CI_JOB_TOKEN` when accessing internal GitLab hosted code. ### Creating the Access Token Gruntwork recommends [creating](https://docs.gitlab.com/user/project/settings/project_access_tokens/#create-a-project-access-token) two Project or Group Access Tokens as best practice: + | Token Name | Required Scopes | Required Role | Purpose | | ------------------------------- | -------------------------------------------- | ------------------------------- | ---------------------------------------------------------------------------- | | **PIPELINES_GITLAB_TOKEN** | `api` (and `ai_features` if using GitLab AI) | Developer, Maintainer, or Owner | Making API calls (e.g., creating comments on merge requests) | @@ -417,6 +436,7 @@ Set an expiration date according to your organization's security policies. We re ::: **Checklist:** + @@ -434,6 +454,7 @@ Add the `PIPELINES_GITLAB_TOKEN` and `PIPELINES_GITLAB_READ_TOKEN` as CI/CD vari 8. Set the value as the Personal Access Token generated in the [Creating the Access Token](#creating-the-access-token) section **Checklist:** + diff --git a/sidebars/docs.js b/sidebars/docs.js index 0de3c67e1f..216c291669 100644 --- a/sidebars/docs.js +++ b/sidebars/docs.js @@ -224,18 +224,6 @@ const sidebar = [ type: "doc", id: "2.0/docs/pipelines/installation/scm-comparison", }, - { - label: "Prerequisites", - type: "category", - collapsed: false, - items: [ - { - label: "AWS Landing Zone", - type: "doc", - id: "2.0/docs/pipelines/installation/prerequisites/awslandingzone", - }, - ], - }, { type: "category", label: "Enable Auth for Pipelines", @@ -487,6 +475,18 @@ const sidebar = [ }, ], }, + { + label: "Prerequisites", + type: "category", + collapsed: false, + items: [ + { + label: "AWS Landing Zone", + type: "doc", + id: "2.0/docs/accountfactory/prerequisites/awslandingzone", + }, + ], + }, { label: "Setup & Installation", type: "doc", diff --git a/src/redirects.js b/src/redirects.js index f2b86849a2..e6f6441866 100644 --- a/src/redirects.js +++ b/src/redirects.js @@ -154,15 +154,15 @@ export const redirects = [ from: "/foundations/running-apps" }, { - to: "/2.0/docs/pipelines/installation/prerequisites/awslandingzone#prerequisites", + to: "/2.0/docs/accountfactory/prerequisites/awslandingzone#prerequisites", from: "/foundations/landing-zone/prerequisites" }, { - to: "/2.0/docs/pipelines/installation/prerequisites/awslandingzone", + to: "/2.0/docs/accountfactory/prerequisites/awslandingzone", from: "/foundations/landing-zone/index" }, { - to: "/2.0/docs/pipelines/installation/prerequisites/awslandingzone", + to: "/2.0/docs/accountfactory/prerequisites/awslandingzone", from: "/foundations/landing-zone" }, { @@ -170,7 +170,7 @@ export const redirects = [ from: "/foundations/landing-zone/add-aws-account" }, { - to: "/2.0/docs/pipelines/installation/prerequisites/awslandingzone#configure-control-tower", + to: "/2.0/docs/accountfactory/prerequisites/awslandingzone#configure-control-tower", from: "/foundations/landing-zone/enable-control-tower" }, { @@ -368,5 +368,9 @@ export const redirects = [ { from: '/2.0/docs/pipelines/architecture/github-workflows', to: '/2.0/docs/pipelines/architecture/ci-workflows' + }, + { + from: '/2.0/docs/pipelines/installation/prerequisites/awslandingzone', + to: '/2.0/docs/accountfactory/prerequisites/awslandingzone' } ] From 9209ecbc193a150299f914d003abd1f7e57555ed Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 25 Sep 2025 19:10:31 -0400 Subject: [PATCH 12/39] docs: Restructured initial setup to avoid assuming AWS docs: Splitting up different cloud providers wip: Progress on stacks --- .../installation/addingnewrepo.md | 51 +++ .../docs/accountfactory/installation/index.md | 2 +- .../pipelines/installation/addingnewrepo.md | 426 ++++++++++++++++-- sidebars/docs.js | 15 +- 4 files changed, 465 insertions(+), 29 deletions(-) create mode 100644 docs/2.0/docs/accountfactory/installation/addingnewrepo.md diff --git a/docs/2.0/docs/accountfactory/installation/addingnewrepo.md b/docs/2.0/docs/accountfactory/installation/addingnewrepo.md new file mode 100644 index 0000000000..9b4414370b --- /dev/null +++ b/docs/2.0/docs/accountfactory/installation/addingnewrepo.md @@ -0,0 +1,51 @@ +# Adding Account Factory to a new repository + +To configure Gruntwork Account Factory in a new GitHub repository, the following steps are required (and will be explained in detail below): + +1. Create your `infrastructure-live-root` repository using Gruntwork's GitHub template. +2. Configure the Gruntwork.io GitHub App to authorize your `infrastructure-live-root` repository, or ensure that the appropriate machine user tokens are set up as repository or organization secrets. +3. Update the Bootstrap Workflow to configure your AWS settings. +4. Execute the Bootstrap Workflow in your `infrastructure-live-root` repository to generate pull requests and additional repositories. + +## Creating the infrastructure-live-root repository + +Gruntwork provides a pre-configured git repository template that incorporates best practices while allowing for customization. + +[infrastructure-live-root-template](https://github.com/gruntwork-io/infrastructure-live-root-template) + +This template generates an `infrastructure-live-root` repository with a bootstrap workflow designed to scaffold a best-practices Terragrunt configuration. It includes patterns for module defaults, global variables, and account baselines. Additionally, it integrates Gruntwork Pipelines, which can be removed if not required. + +The workflow can optionally scaffold the `infrastructure-live-access-control` and `infrastructure-catalog` repositories. + +Navigate to the template repository and select **Use this template** -> **Create a new Repository**. Choose your organization as the owner, add a description if desired, set the repository to **private**, and click **Create repository**. + +## Configuring Gruntwork app settings + +Use the Gruntwork.io GitHub App to [add the repository as an Infra Root repository](/2.0/docs/pipelines/installation/viagithubapp#configuration). + +If using the [machine user model](/2.0/docs/pipelines/installation/viamachineusers.md), ensure the `INFRA_ROOT_WRITE_TOKEN` (and `ORG_REPO_ADMIN_TOKEN` for enterprise customers) is added to the repository as a secret or configured as an organization secret. + +## Updating the Bootstrap Workflow + +Return to your `infrastructure-live-root` repository and follow the `README` instructions to update the bootstrap workflow for IaC Foundations. Provide details about your AWS organization, accounts, and default values for new account provisioning. + +## Running the workflow + +Follow the instructions in your `infrastructure-live-root` repository to execute the Bootstrap Workflow. Gruntwork support is available to address any questions that arise. During the workflow execution, you can choose to create the `infrastructure-live-access-control` and `infrastructure-catalog` repositories. These repositories will be created in your GitHub organization using values defined in the workflow configuration. + +### Infrastructure live access control + +This repository is primarily for Enterprise customers but is recommended for all users. When running the Bootstrap Workflow in your `infrastructure-live-root` repository, select the option to "Bootstrap the infrastructure-access-control repository." + +### Infrastructure catalog + +The Bootstrap Workflow also creates an empty `infrastructure-catalog` repository. This repository is used to store Terraform/OpenTofu modules authored by your organization for internal use. During the Bootstrap Workflow execution in your `infrastructure-live-root` repository, select the option to "Bootstrap the infrastructure-catalog repository." + +## Completing instructions in Bootstrap Pull Requests + +Each of your repositories will contain a Bootstrap Pull Request. Follow the instructions in these Pull Requests to finalize the setup of your IaC repositories. + +:::info + +The bootstrapping pull requests include pre-configured files, such as a `.mise.toml` file that specifies versions of OpenTofu and Terragrunt. Ensure you review and update these configurations to align with your organization's requirements. +::: diff --git a/docs/2.0/docs/accountfactory/installation/index.md b/docs/2.0/docs/accountfactory/installation/index.md index b7a81b99f2..2d71cba52c 100644 --- a/docs/2.0/docs/accountfactory/installation/index.md +++ b/docs/2.0/docs/accountfactory/installation/index.md @@ -2,7 +2,7 @@ ## Overview -Account Factory is automatically integrated into [new Pipelines root repositories](/2.0/docs/pipelines/installation/addingnewrepo) during the bootstrapping process. +Account Factory is automatically integrated into [new Pipelines root repositories](/2.0/docs/accountfactory/installation/addingnewrepo) during the bootstrapping process. By default, Account Factory includes the following components: diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.md b/docs/2.0/docs/pipelines/installation/addingnewrepo.md index 9001969540..07f903ed1d 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.md +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.md @@ -1,51 +1,425 @@ # Initial Setup -To configure Gruntwork Pipelines in a new GitHub repository, complete the following steps: +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; -1. Create your `infrastructure-live-root` repository using Gruntwork's GitHub template. -2. Configure the Gruntwork.io GitHub App to authorize your `infrastructure-live-root` repository, or ensure that the appropriate machine user tokens are set up as repository or organization secrets. -3. Update the Bootstrap Workflow to configure your AWS settings. -4. Execute the Bootstrap Workflow in your `infrastructure-live-root` repository to generate pull requests and additional repositories. +To configure Gruntwork Pipelines in a new GitHub repository, complete the following steps (which are explained in detail below): -## Creating the infrastructure-live-root repository +1. Create an `infrastructure-live` repository. +2. Configure the Gruntwork.io GitHub App to authorize your `infrastructure-live` repository, or ensure that the appropriate machine user tokens are set up as repository or organization secrets. +3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. +4. Create `.github/workflows/pipelines.yml` to tell your GitHub Actions workflow how to run your pipelines. +5. Commit and push your changes to your repository. -Gruntwork provides a pre-configured git repository template that incorporates best practices while allowing for customization. +## Creating the infrastructure-live repository -[infrastructure-live-root-template](https://github.com/gruntwork-io/infrastructure-live-root-template) +Creating an `infrastructure-live` repository is fairly straightforward. First, create a new repository using the official GitHub documentation for [creating repositories](https://docs.github.com/en/repositories/creating-and-managing-repositories/creating-a-new-repository). Name the repository something like `infrastructure-live` and make it private (or internal). -This template generates an `infrastructure-live-root` repository with a bootstrap workflow designed to scaffold a best-practices Terragrunt configuration. It includes patterns for module defaults, global variables, and account baselines. Additionally, it integrates Gruntwork Pipelines, which can be removed if not required. +Clone the repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). -The workflow can optionally scaffold the `infrastructure-live-access-control` and `infrastructure-catalog` repositories. +:::tip -Navigate to the template repository and select **Use this template** -> **Create a new Repository**. Choose your organization as the owner, add a description if desired, set the repository to **private**, and click **Create repository**. +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). -## Configuring Gruntwork app settings +::: + +For example: + +```bash +git clone git@github.com:acme/infrastructure-live.git +``` + +Once the repository is cloned locally, you'll want to create a `.mise.toml` file in the root of the repository to tell Pipelines what versions of Terragrunt and OpenTofu to use. + +For example: + +```toml title=".mise.toml" +[tools] +terragrunt = "0.88.0" +opentofu = "1.10.6" +``` + +:::tip + +Follow the official [mise installation guide](https://mise.jdx.dev/getting-started.html) to install it locally. + +You can get `mise` to lookup the versions available for a given tool by using the `ls-remote` command. + +```bash +mise ls-remote terragrunt +mise ls-remote opentofu +``` + +You can also use the `install` command to install them: + +```bash +mise install +``` + +::: + +## Configuring SCM Access + +Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). + +There are two ways to configure SCM access for Pipelines: + +1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). +2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitLab users, and for GitHub users who cannot use the GitHub App). + +## Creating Cloud Resources for Pipelines + +To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. + +:::note + +If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. + +::: + +This guide will assume a blank slate, so you can start by creating a new Git repository to track the infrastructure that you're provisioning here. + +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + +::: -Use the Gruntwork.io GitHub App to [add the repository as an Infra Root repository](/2.0/docs/pipelines/installation/viagithubapp#configuration). + + -If using the [machine user model](/2.0/docs/pipelines/installation/viamachineusers.md), ensure the `INFRA_ROOT_WRITE_TOKEN` (and `ORG_REPO_ADMIN_TOKEN` for enterprise customers) is added to the repository as a secret or configured as an organization secret. +The resources that you need provisioned in AWS to start managing resources with Pipelines are: -## Updating the Bootstrap Workflow +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands -Return to your `infrastructure-live-root` repository and follow the `README` instructions to update the bootstrap workflow for IaC Foundations. Provide details about your AWS organization, accounts, and default values for new account provisioning. +For every account you want Pipelines to manage infrastructure in. -## Running the workflow +:::tip -Follow the instructions in your `infrastructure-live-root` repository to execute the Bootstrap Workflow. Gruntwork support is available to address any questions that arise. During the workflow execution, you can choose to create the `infrastructure-live-access-control` and `infrastructure-catalog` repositories. These repositories will be created in your GitHub organization using values defined in the workflow configuration. +What follows is a guide for creating the basic, minimal set of resources required to get started. If you have access to Gruntwork's [Infrastructure Library](/2.0/docs/library/concepts/overview), you can use off the following off-the-shelf modules to do the work for you: -### Infrastructure live access control +- [OIDC Provider for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-openid-connect-provider/) +- [IAM Role for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-iam-role/) -This repository is primarily for Enterprise customers but is recommended for all users. When running the Bootstrap Workflow in your `infrastructure-live-root` repository, select the option to "Bootstrap the infrastructure-access-control repository." +::: + +To get started, you can create the modules that you are going to provision for each of these in a `catalog/modules` directory. + +```bash +mkdir -p catalog/modules/{github-actions-oidc-provider, github-actions-iam-role} +``` + +You'll also want to create the scaffolding for the Terragrunt units you want to provision. -### Infrastructure catalog +```bash +mkdir -p live/acme/_global/{github-actions-oidc-provider, github-actions-plan-role, github-actions-apply-role} +``` -The Bootstrap Workflow also creates an empty `infrastructure-catalog` repository. This repository is used to store Terraform/OpenTofu modules authored by your organization for internal use. During the Bootstrap Workflow execution in your `infrastructure-live-root` repository, select the option to "Bootstrap the infrastructure-catalog repository." + -## Completing instructions in Bootstrap Pull Requests + + -Each of your repositories will contain a Bootstrap Pull Request. Follow the instructions in these Pull Requests to finalize the setup of your IaC repositories. +The resources that you need provisioned in Azure to start managing resources with Pipelines are: -:::info +1. An Azure Resource Group for OpenTofu state resources + 1. An Azure Storage Account in that resource group for OpenTofu state storage + 1. An Azure Storage Container in that storage account for OpenTofu state storage +2. An Entra ID Application to use for plans + 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + 2. A role assignment for the service principal to access the Azure Storage Account +3. An Entra ID Application to use for applies + 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + +:::note + +This may seem like a lot of work, but it's a one-time step to onboard a new subscription for management with Pipelines. + +You'll receive all the IaC required to create this infrastructure soon, and the majority of the work will be to create a reusable catalog that makes it easy to bootstrap additional subscriptions in the future. -The bootstrapping pull requests include pre-configured files, such as a `mise.toml` file that specifies versions of OpenTofu and Terragrunt. Ensure you review and update these configurations to align with your organization's requirements. ::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Set up the catalog of IaC configurations for bootstrapping a new subscription +2. Provision these bootstrap resources using Terragrunt +3. Pull the bootstrap resources into state, using the storage account we just provisioned + +### Setting up the catalog + +To start setting up the catalog, create the following files in a `catalog` directory you create in your `infrastructure-live` repository: + +```bash +mkdir -p catalog/{modules, units, stacks} +``` + +These subdirectories within the `catalog` directory will be used to store the reusable IaC configurations for bootstrapping each new subscription you want managed by Pipelines. + +:::tip + +We typically recommend using a separate repository for your Terragrunt infrastructure catalog, but we're using a single repository for simplicity in this guide. + +You're free to do that if you would prefer it. + +::: + +Each of the following files can be copied into the `catalog` directory, with the filename listed at the top of the code block. + +```hcl title="catalog/modules/entra-id-application/versions.tf" +terraform { + required_version = ">= 1.0.0" + required_providers { + azuread = { + source = "hashicorp/azuread" + version = "~> 3.6.0" + } + } +} +``` + +```hcl title="catalog/modules/entra-id-application/variables.tf" +variable "display_name" { + description = "The display name for the Entra ID application." + type = string +} + +variable "description" { + description = "The description for the Entra ID application." + type = string +} +``` + +```hcl title="catalog/modules/entra-id-application/main.tf" +resource "azuread_application" "app" { + display_name = var.display_name + description = var.description +} +``` + +```hcl title="catalog/modules/entra-id-application/outputs.tf" +output "id" { + description = "The ID of the Entra ID application." + value = azuread_application.app.id +} + +output "client_id" { + description = "The client ID of the Entra ID application." + value = azuread_application.app.client_id +} + +output "display_name" { + description = "The display name of the Entra ID application." + value = azuread_application.app.display_name +} +``` + +### Provisioning the resources + +### Pulling the resources into state + + + + +## Creating `.gruntwork` HCL configurations + +Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). + +For example: + +```hcl title=".gruntwork/repository.hcl" +repository { + deploy_branch_name = "main" +} +``` + + + + +```hcl title=".gruntwork/environment.hcl" +environment "dev" { + filter { + paths = ["dev/*"] + } + + authentication { + aws_oidc { + account_id = "123456789012" + plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" + } + } +} + +environment "prod" { + filter { + paths = ["prod/*"] + } + + authentication { + aws_oidc { + account_id = "987654321098" + plan_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-apply" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. + +::: + +:::tip + +Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to authenticate with AWS conveniently. + +::: + + + + +```hcl title=".gruntwork/environment.hcl" +environment "dev" { + filter { + paths = ["dev/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "11111111-1111-1111-1111-111111111111" + + plan_client_id = "33333333-3333-3333-3333-333333333333" + apply_client_id = "44444444-4444-4444-4444-444444444444" + } + } +} + +environment "prod" { + filter { + paths = ["prod/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "22222222-2222-2222-2222-222222222222" + + plan_client_id = "55555555-5555-5555-5555-555555555555" + apply_client_id = "66666666-6666-6666-6666-666666666666" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. + +::: + + + + +```hcl title=".gruntwork/environment.hcl" +environment "dev" { + filter { + paths = ["dev/*"] + } + + authentication { + custom { + auth_provider_cmd = "./scripts/custom-auth-dev.sh" + } + } +} + +environment "prod" { + filter { + paths = ["prod/*"] + } + + authentication { + custom { + auth_provider_cmd = "./scripts/custom-auth-prod.sh" + } + } +} +``` + +:::tip + +Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. + +::: + + + + +## Creating `.github/workflows/pipelines.yml` + +Create a `.github/workflows/pipelines.yml` file in the root of your `infrastructure-live` repository with the following content: + +```yaml title=".github/workflows/pipelines.yml" +name: Pipelines +run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}" +on: + push: + branches: + - main + paths-ignore: + - ".github/**" + pull_request: + types: + - opened + - synchronize + - reopened + paths-ignore: + - ".github/**" + +# Permissions to assume roles and create pull requests +permissions: + id-token: write + contents: write + pull-requests: write + +jobs: + GruntworkPipelines: + uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@main +``` + +:::tip + +You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwork-io/pipelines-workflows/blob/main/.github/workflows/pipelines.yml) to learn how this GitHub Actions workflow calls the Pipelines CLI to run your pipelines. + +::: + +## Commit and push your changes + +Commit and push your changes to your repository. + +:::note + +You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow. + +::: + +```bash +git add . +git commit -m "Add Pipelines GitHub Actions workflow [skip ci]" +git push +``` + +🚀 You've successfully added Gruntwork Pipelines to your new repository! + +## Next steps + +You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. diff --git a/sidebars/docs.js b/sidebars/docs.js index 216c291669..dc063efbfa 100644 --- a/sidebars/docs.js +++ b/sidebars/docs.js @@ -489,8 +489,19 @@ const sidebar = [ }, { label: "Setup & Installation", - type: "doc", - id: "2.0/docs/accountfactory/installation/index", + type: "category", + collapsed: true, + link: { + type: "doc", + id: "2.0/docs/accountfactory/installation/index", + }, + items: [ + { + label: "Adding Account Factory to a new repository", + type: "doc", + id: "2.0/docs/accountfactory/installation/addingnewrepo", + }, + ], }, { label: "Guides", From 8c5a4998cef895d46000925ca0c25bcd8571e19b Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Wed, 1 Oct 2025 11:57:16 -0400 Subject: [PATCH 13/39] feat: Set up full Azure installation guide --- .../pipelines/installation/addingnewrepo.md | 425 ------------ .../pipelines/installation/addingnewrepo.mdx | 607 ++++++++++++++++++ 2 files changed, 607 insertions(+), 425 deletions(-) delete mode 100644 docs/2.0/docs/pipelines/installation/addingnewrepo.md create mode 100644 docs/2.0/docs/pipelines/installation/addingnewrepo.mdx diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.md b/docs/2.0/docs/pipelines/installation/addingnewrepo.md deleted file mode 100644 index 07f903ed1d..0000000000 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.md +++ /dev/null @@ -1,425 +0,0 @@ -# Initial Setup - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -To configure Gruntwork Pipelines in a new GitHub repository, complete the following steps (which are explained in detail below): - -1. Create an `infrastructure-live` repository. -2. Configure the Gruntwork.io GitHub App to authorize your `infrastructure-live` repository, or ensure that the appropriate machine user tokens are set up as repository or organization secrets. -3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. -4. Create `.github/workflows/pipelines.yml` to tell your GitHub Actions workflow how to run your pipelines. -5. Commit and push your changes to your repository. - -## Creating the infrastructure-live repository - -Creating an `infrastructure-live` repository is fairly straightforward. First, create a new repository using the official GitHub documentation for [creating repositories](https://docs.github.com/en/repositories/creating-and-managing-repositories/creating-a-new-repository). Name the repository something like `infrastructure-live` and make it private (or internal). - -Clone the repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). - -:::tip - -If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). - -::: - -For example: - -```bash -git clone git@github.com:acme/infrastructure-live.git -``` - -Once the repository is cloned locally, you'll want to create a `.mise.toml` file in the root of the repository to tell Pipelines what versions of Terragrunt and OpenTofu to use. - -For example: - -```toml title=".mise.toml" -[tools] -terragrunt = "0.88.0" -opentofu = "1.10.6" -``` - -:::tip - -Follow the official [mise installation guide](https://mise.jdx.dev/getting-started.html) to install it locally. - -You can get `mise` to lookup the versions available for a given tool by using the `ls-remote` command. - -```bash -mise ls-remote terragrunt -mise ls-remote opentofu -``` - -You can also use the `install` command to install them: - -```bash -mise install -``` - -::: - -## Configuring SCM Access - -Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). - -There are two ways to configure SCM access for Pipelines: - -1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). -2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitLab users, and for GitHub users who cannot use the GitHub App). - -## Creating Cloud Resources for Pipelines - -To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. - -:::note - -If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. - -::: - -This guide will assume a blank slate, so you can start by creating a new Git repository to track the infrastructure that you're provisioning here. - -:::tip - -If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). - -::: - - - - -The resources that you need provisioned in AWS to start managing resources with Pipelines are: - -1. An OpenID Connect (OIDC) provider -2. An IAM role for Pipelines to assume when running Terragrunt plan commands -3. An IAM role for Pipelines to assume when running Terragrunt apply commands - -For every account you want Pipelines to manage infrastructure in. - -:::tip - -What follows is a guide for creating the basic, minimal set of resources required to get started. If you have access to Gruntwork's [Infrastructure Library](/2.0/docs/library/concepts/overview), you can use off the following off-the-shelf modules to do the work for you: - -- [OIDC Provider for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-openid-connect-provider/) -- [IAM Role for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-iam-role/) - -::: - -To get started, you can create the modules that you are going to provision for each of these in a `catalog/modules` directory. - -```bash -mkdir -p catalog/modules/{github-actions-oidc-provider, github-actions-iam-role} -``` - -You'll also want to create the scaffolding for the Terragrunt units you want to provision. - -```bash -mkdir -p live/acme/_global/{github-actions-oidc-provider, github-actions-plan-role, github-actions-apply-role} -``` - - - - - - -The resources that you need provisioned in Azure to start managing resources with Pipelines are: - -1. An Azure Resource Group for OpenTofu state resources - 1. An Azure Storage Account in that resource group for OpenTofu state storage - 1. An Azure Storage Container in that storage account for OpenTofu state storage -2. An Entra ID Application to use for plans - 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch - 2. A Service Principal for the application to be used in role assignments - 1. A role assignment for the service principal to access the Azure subscription - 2. A role assignment for the service principal to access the Azure Storage Account -3. An Entra ID Application to use for applies - 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch - 2. A Service Principal for the application to be used in role assignments - 1. A role assignment for the service principal to access the Azure subscription - -:::note - -This may seem like a lot of work, but it's a one-time step to onboard a new subscription for management with Pipelines. - -You'll receive all the IaC required to create this infrastructure soon, and the majority of the work will be to create a reusable catalog that makes it easy to bootstrap additional subscriptions in the future. - -::: - -The process that we'll follow to get these resources ready for Pipelines is: - -1. Set up the catalog of IaC configurations for bootstrapping a new subscription -2. Provision these bootstrap resources using Terragrunt -3. Pull the bootstrap resources into state, using the storage account we just provisioned - -### Setting up the catalog - -To start setting up the catalog, create the following files in a `catalog` directory you create in your `infrastructure-live` repository: - -```bash -mkdir -p catalog/{modules, units, stacks} -``` - -These subdirectories within the `catalog` directory will be used to store the reusable IaC configurations for bootstrapping each new subscription you want managed by Pipelines. - -:::tip - -We typically recommend using a separate repository for your Terragrunt infrastructure catalog, but we're using a single repository for simplicity in this guide. - -You're free to do that if you would prefer it. - -::: - -Each of the following files can be copied into the `catalog` directory, with the filename listed at the top of the code block. - -```hcl title="catalog/modules/entra-id-application/versions.tf" -terraform { - required_version = ">= 1.0.0" - required_providers { - azuread = { - source = "hashicorp/azuread" - version = "~> 3.6.0" - } - } -} -``` - -```hcl title="catalog/modules/entra-id-application/variables.tf" -variable "display_name" { - description = "The display name for the Entra ID application." - type = string -} - -variable "description" { - description = "The description for the Entra ID application." - type = string -} -``` - -```hcl title="catalog/modules/entra-id-application/main.tf" -resource "azuread_application" "app" { - display_name = var.display_name - description = var.description -} -``` - -```hcl title="catalog/modules/entra-id-application/outputs.tf" -output "id" { - description = "The ID of the Entra ID application." - value = azuread_application.app.id -} - -output "client_id" { - description = "The client ID of the Entra ID application." - value = azuread_application.app.client_id -} - -output "display_name" { - description = "The display name of the Entra ID application." - value = azuread_application.app.display_name -} -``` - -### Provisioning the resources - -### Pulling the resources into state - - - - -## Creating `.gruntwork` HCL configurations - -Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). - -For example: - -```hcl title=".gruntwork/repository.hcl" -repository { - deploy_branch_name = "main" -} -``` - - - - -```hcl title=".gruntwork/environment.hcl" -environment "dev" { - filter { - paths = ["dev/*"] - } - - authentication { - aws_oidc { - account_id = "123456789012" - plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" - apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" - } - } -} - -environment "prod" { - filter { - paths = ["prod/*"] - } - - authentication { - aws_oidc { - account_id = "987654321098" - plan_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-plan" - apply_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-apply" - } - } -} -``` - -:::tip - -Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. - -::: - -:::tip - -Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to authenticate with AWS conveniently. - -::: - - - - -```hcl title=".gruntwork/environment.hcl" -environment "dev" { - filter { - paths = ["dev/*"] - } - - authentication { - azure_oidc { - tenant_id = "00000000-0000-0000-0000-000000000000" - subscription_id = "11111111-1111-1111-1111-111111111111" - - plan_client_id = "33333333-3333-3333-3333-333333333333" - apply_client_id = "44444444-4444-4444-4444-444444444444" - } - } -} - -environment "prod" { - filter { - paths = ["prod/*"] - } - - authentication { - azure_oidc { - tenant_id = "00000000-0000-0000-0000-000000000000" - subscription_id = "22222222-2222-2222-2222-222222222222" - - plan_client_id = "55555555-5555-5555-5555-555555555555" - apply_client_id = "66666666-6666-6666-6666-666666666666" - } - } -} -``` - -:::tip - -Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. - -::: - - - - -```hcl title=".gruntwork/environment.hcl" -environment "dev" { - filter { - paths = ["dev/*"] - } - - authentication { - custom { - auth_provider_cmd = "./scripts/custom-auth-dev.sh" - } - } -} - -environment "prod" { - filter { - paths = ["prod/*"] - } - - authentication { - custom { - auth_provider_cmd = "./scripts/custom-auth-prod.sh" - } - } -} -``` - -:::tip - -Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. - -::: - - - - -## Creating `.github/workflows/pipelines.yml` - -Create a `.github/workflows/pipelines.yml` file in the root of your `infrastructure-live` repository with the following content: - -```yaml title=".github/workflows/pipelines.yml" -name: Pipelines -run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}" -on: - push: - branches: - - main - paths-ignore: - - ".github/**" - pull_request: - types: - - opened - - synchronize - - reopened - paths-ignore: - - ".github/**" - -# Permissions to assume roles and create pull requests -permissions: - id-token: write - contents: write - pull-requests: write - -jobs: - GruntworkPipelines: - uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@main -``` - -:::tip - -You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwork-io/pipelines-workflows/blob/main/.github/workflows/pipelines.yml) to learn how this GitHub Actions workflow calls the Pipelines CLI to run your pipelines. - -::: - -## Commit and push your changes - -Commit and push your changes to your repository. - -:::note - -You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow. - -::: - -```bash -git add . -git commit -m "Add Pipelines GitHub Actions workflow [skip ci]" -git push -``` - -🚀 You've successfully added Gruntwork Pipelines to your new repository! - -## Next steps - -You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx new file mode 100644 index 0000000000..b3bb0aca99 --- /dev/null +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -0,0 +1,607 @@ +# Initial Setup + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import PersistentCheckbox from '/src/components/PersistentCheckbox'; + +To configure Gruntwork Pipelines in a new GitHub repository, complete the following steps (which are explained in detail below): + +1. Create an `infrastructure-live` repository. +2. Configure the Gruntwork.io GitHub App to authorize your `infrastructure-live` repository, or ensure that the appropriate machine user tokens are set up as repository or organization secrets. +3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. +4. Create `.github/workflows/pipelines.yml` to tell your GitHub Actions workflow how to run your pipelines. +5. Commit and push your changes to your repository. + +## Creating the infrastructure-live repository + +Creating an `infrastructure-live` repository is fairly straightforward. First, create a new repository using the official GitHub documentation for [creating repositories](https://docs.github.com/en/repositories/creating-and-managing-repositories/creating-a-new-repository). Name the repository something like `infrastructure-live` and make it private (or internal). + +Clone the repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). + +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + +::: + +For example: + +```bash +git clone git@github.com:acme/infrastructure-live.git +``` + +:::note Progress Checklist + + +::: + +Once the repository is cloned locally, you'll want to create a `.mise.toml` file in the root of the repository to tell Pipelines what versions of Terragrunt and OpenTofu to use. + +For example: + +```toml title=".mise.toml" +[tools] +terragrunt = "0.88.0" +opentofu = "1.10.6" +``` + +:::tip + +Follow the official [mise installation guide](https://mise.jdx.dev/getting-started.html) to install it locally. + +You can get `mise` to lookup the versions available for a given tool by using the `ls-remote` command. + +```bash +mise ls-remote terragrunt +mise ls-remote opentofu +``` + +::: + +Next, install Terragrunt and OpenTofu locally: + +```bash +mise install +``` + +:::note Progress Checklist + + + +::: + + + +## Configuring SCM Access + +Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). + +There are two ways to configure SCM access for Pipelines: + +1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). +2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitLab users, and for GitHub users who cannot use the GitHub App). + +:::note Progress Checklist + + + +::: + +## Creating Cloud Resources for Pipelines + +To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. + +:::note + +If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. + +::: + +This guide will assume a blank slate, so you can start by creating a new Git repository to track the infrastructure that you're provisioning here. + +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + +::: + + + + +The resources that you need provisioned in AWS to start managing resources with Pipelines are: + +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands + +For every account you want Pipelines to manage infrastructure in. + +:::tip + +What follows is a guide for creating the basic, minimal set of resources required to get started. If you have access to Gruntwork's [Infrastructure Library](/2.0/docs/library/concepts/overview), you can use off the following off-the-shelf modules to do the work for you: + +- [OIDC Provider for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-openid-connect-provider/) +- [IAM Role for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-iam-role/) + +::: + +To get started, you can create the modules that you are going to provision for each of these in a `catalog/modules` directory. + +```bash +mkdir -p catalog/modules/{github-actions-oidc-provider, github-actions-iam-role} +``` + +You'll also want to create the scaffolding for the Terragrunt units you want to provision. + +```bash +mkdir -p live/acme/_global/{github-actions-oidc-provider, github-actions-plan-role, github-actions-apply-role} +``` + + + + +The resources that you need provisioned in Azure to start managing resources with Pipelines are: + +1. An Azure Resource Group for OpenTofu state resources + 1. An Azure Storage Account in that resource group for OpenTofu state storage + 1. An Azure Storage Container in that storage account for OpenTofu state storage +2. An Entra ID Application to use for plans + 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + 2. A role assignment for the service principal to access the Azure Storage Account +3. An Entra ID Application to use for applies + 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository +2. Use Terragrunt to provision these resources in your Azure subscription +3. Pull the bootstrap resources into state, using the storage account we just provisioned + +### Bootstrap your `infrastructure-live` repository + +To bootstrap your Azure subscription for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: + +```hcl title="root.hcl" +generate "provider" { + path = "provider.tf" + if_exists = "overwrite" + contents = < + +::: + +This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers. + +```hcl title="name-of-subscription/sub.hcl +locals { + // This is the name of the resource group that will be created for state storage. + // + // You don't need to change this if you don't want to (and you don't already have a resource group named this). + state_resource_group_name = "pipelines-rg" + + // Make sure this is less than 24 characters, and only contains lowercase letters and numbers + // to obey Azure's naming requirements. + // + // You will need to change this. + state_storage_account_name = "name-of-storage-account-you-want-to-use-for-state" + + // This is the name of the container you'll use for state storage. + // + // You don't need to change this if you don't want to. + state_storage_container_name = "tfstate" +} +``` + +:::note Progress Checklist + + + + + + +::: + +This file is used by all units in the `name-of-subscription` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your subscription. + +```hcl title="name-of-subscription/bootstrap/terragrunt.stack.hcl" +locals { + // Read from parent configurations instead of defining these values locally + // so that other stacks and units in this directory can reuse the same configurations. + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) +} + +stack "bootstrap" { + source = "git@github.com:gruntwork-io/terragrunt-scale-catalog.git//stacks/azure/bootstrap?ref=v1.0.0" + path = "bootstrap" + + values = { + terragrunt_scale_catalog_url = "git@github.com:gruntwork-io/terragrunt-scale-catalog.git" + + // Set the location to the location you want to bootstrap your subscription in. + location = "East US" + + // Read from parent configurations instead of defining these values locally. + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name + + // Set the organization name you want to use for your subscription. + org_name = "acme" + + // Set the repository name you want to use for your subscription. + repo_name = "infrastructure-live" + + // Set the OIDC resource prefix you want to use for your subscription. + // + // This will be used to determine the names of the OIDC resources like the Entra ID Applications that are created. + // e.g. `pipelines`-plan, `pipelines`-apply, etc. + oidc_resource_prefix = "pipelines" + } +} +``` + +:::note Progress Checklist + + + + + + + +::: + +You'll also want to make sure that you add the `azure` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with Azure for the bootstrapping process. + +```toml title=".mise.toml" +[tools] +# The Terragrunt and OpenTofu entries should already be present... +azure-cli = "2.77.0" +``` + +:::tip + +Remember that you can use `ls-remote` to list the available versions of the `azure-cli` tool. + +```bash +mise ls-remote azure-cli +``` + +::: + +Make sure to run `mise install` to install the `azure-cli` tool. + +```bash +mise install +``` + +If you haven't already, you'll want to authenticate with Azure using the `az` CLI. + +```bash +az login +``` + +:::note Progress Checklist + + + + +::: + +### Provisioning the resources + +Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your Azure subscription. + +To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: + +- `ARM_TENANT_ID` +- `ARM_SUBSCRIPTION_ID` + +For example: + +```bash +export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" +export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" +``` + +:::note Progress Checklist + + + +::: + +First, make sure that everything is set up correctly by running a plan in the subscription directory. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process. + +::: + +Next, apply the changes to your subscription. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply +``` + +:::tip + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + +::: + +:::note Progress Checklist + + + +::: + +:::tip Troubleshooting Tips + +If you encounter issues during this step, please refer to the [Initial Apply Failure](#initial-apply-failure) section. + +::: + +### Pulling the resources into state + +Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy +``` + +:::tip + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + +::: + +:::note Progress Checklist + + + +::: + + + + +## Creating `.gruntwork` HCL configurations + +Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). + +For example: + +```hcl title=".gruntwork/repository.hcl" +repository { + deploy_branch_name = "main" +} +``` + + + + +```hcl title=".gruntwork/environment.hcl" +environment "dev" { + filter { + paths = ["dev/*"] + } + + authentication { + aws_oidc { + account_id = "123456789012" + plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" + } + } +} + +environment "prod" { + filter { + paths = ["prod/*"] + } + + authentication { + aws_oidc { + account_id = "987654321098" + plan_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-apply" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. + +::: + +:::tip + +Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to authenticate with AWS conveniently. + +::: + + + + +```hcl title=".gruntwork/environment.hcl" +environment "dev" { + filter { + paths = ["dev/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "11111111-1111-1111-1111-111111111111" + + plan_client_id = "33333333-3333-3333-3333-333333333333" + apply_client_id = "44444444-4444-4444-4444-444444444444" + } + } +} + +environment "prod" { + filter { + paths = ["prod/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "22222222-2222-2222-2222-222222222222" + + plan_client_id = "55555555-5555-5555-5555-555555555555" + apply_client_id = "66666666-6666-6666-6666-666666666666" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. + +::: + + + + +```hcl title=".gruntwork/environment.hcl" +environment "dev" { + filter { + paths = ["dev/*"] + } + + authentication { + custom { + auth_provider_cmd = "./scripts/custom-auth-dev.sh" + } + } +} + +environment "prod" { + filter { + paths = ["prod/*"] + } + + authentication { + custom { + auth_provider_cmd = "./scripts/custom-auth-prod.sh" + } + } +} +``` + +:::tip + +Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. + +::: + + + + +## Creating `.github/workflows/pipelines.yml` + +Create a `.github/workflows/pipelines.yml` file in the root of your `infrastructure-live` repository with the following content: + +```yaml title=".github/workflows/pipelines.yml" +name: Pipelines +run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}" +on: + push: + branches: + - main + paths-ignore: + - ".github/**" + pull_request: + types: + - opened + - synchronize + - reopened + paths-ignore: + - ".github/**" + +# Permissions to assume roles and create pull requests +permissions: + id-token: write + contents: write + pull-requests: write + +jobs: + GruntworkPipelines: + uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@main +``` + +:::tip + +You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwork-io/pipelines-workflows/blob/main/.github/workflows/pipelines.yml) to learn how this GitHub Actions workflow calls the Pipelines CLI to run your pipelines. + +::: + +## Commit and push your changes + +Commit and push your changes to your repository. + +:::note + +You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow. + +::: + +```bash +git add . +git commit -m "Add Pipelines GitHub Actions workflow [skip ci]" +git push +``` + +🚀 You've successfully added Gruntwork Pipelines to your new repository! + +## Next steps + +You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. + +## Troubleshooting Tips + +If you encounter one of the following issues, please refer to the troubleshooting guidance for each scenario: + +### Initial Apply Failure + +If your initial apply fails, follow these steps to troubleshoot the issue: + + + + + + + From b5b460f253bd3c90ed24e002c9755ee28c51c920 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Wed, 1 Oct 2025 15:07:32 -0400 Subject: [PATCH 14/39] fix: Fixing the checkbox ids --- .../pipelines/installation/addingnewrepo.mdx | 145 ++++++++++-------- 1 file changed, 83 insertions(+), 62 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index b3bb0aca99..41973d71d0 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -219,9 +219,9 @@ locals { :::note Progress Checklist - + - + ::: @@ -366,7 +366,7 @@ We're adding the `--no-stack-generate` flag here, as Terragrunt will already hav :::tip Troubleshooting Tips -If you encounter issues during this step, please refer to the [Initial Apply Failure](#initial-apply-failure) section. +If you encounter issues during this step, please refer to the [Initial Apply Failure](#azure-initial-apply-failure) section. ::: @@ -386,7 +386,7 @@ We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiti :::note Progress Checklist - + ::: @@ -397,7 +397,9 @@ We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiti Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). -For example: +### The `repository` block + +The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. ```hcl title=".gruntwork/repository.hcl" repository { @@ -405,13 +407,26 @@ repository { } ``` +:::note Progress Checklist + + + + +::: + +### The `environment` block + +Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). + +For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. + -```hcl title=".gruntwork/environment.hcl" -environment "dev" { +```hcl title=".gruntwork/environment-an-aws-account.hcl" +environment "an_aws_account" { filter { - paths = ["dev/*"] + paths = ["an-aws-account/*"] } authentication { @@ -422,20 +437,6 @@ environment "dev" { } } } - -environment "prod" { - filter { - paths = ["prod/*"] - } - - authentication { - aws_oidc { - account_id = "987654321098" - plan_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-plan" - apply_iam_role_arn = "arn:aws:iam::987654321098:role/pipelines-apply" - } - } -} ``` :::tip @@ -450,13 +451,24 @@ Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-b ::: +:::note Progress Checklist + + + + + + + + +::: + -```hcl title=".gruntwork/environment.hcl" -environment "dev" { +```hcl title=".gruntwork/environment-an-azure-subscription.hcl" +environment "an_azure_subscription" { filter { - paths = ["dev/*"] + paths = ["an-azure-subscription/*"] } authentication { @@ -469,22 +481,6 @@ environment "dev" { } } } - -environment "prod" { - filter { - paths = ["prod/*"] - } - - authentication { - azure_oidc { - tenant_id = "00000000-0000-0000-0000-000000000000" - subscription_id = "22222222-2222-2222-2222-222222222222" - - plan_client_id = "55555555-5555-5555-5555-555555555555" - apply_client_id = "66666666-6666-6666-6666-666666666666" - } - } -} ``` :::tip @@ -493,10 +489,23 @@ Learn more about how Pipelines authenticates to Azure in the [Authenticating to ::: +:::note Progress Checklist + + + + + + + + + + +::: + -```hcl title=".gruntwork/environment.hcl" +```hcl title=".gruntwork/environment-dev.hcl" environment "dev" { filter { paths = ["dev/*"] @@ -508,18 +517,6 @@ environment "dev" { } } } - -environment "prod" { - filter { - paths = ["prod/*"] - } - - authentication { - custom { - auth_provider_cmd = "./scripts/custom-auth-prod.sh" - } - } -} ``` :::tip @@ -528,6 +525,17 @@ Learn more about how Pipelines can authenticate with custom authentication in th ::: +:::note Progress Checklist + + + + + + + + +::: + @@ -569,6 +577,13 @@ You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwor ::: +:::note Progress Checklist + + + + +::: + ## Commit and push your changes Commit and push your changes to your repository. @@ -585,6 +600,13 @@ git commit -m "Add Pipelines GitHub Actions workflow [skip ci]" git push ``` +:::note Progress Checklist + + + + +::: + 🚀 You've successfully added Gruntwork Pipelines to your new repository! ## Next steps @@ -593,15 +615,14 @@ You have successfully completed the installation of Gruntwork Pipelines in a new ## Troubleshooting Tips -If you encounter one of the following issues, please refer to the troubleshooting guidance for each scenario: +If you encounter one of the following issues, please refer to the troubleshooting guidance for each scenario. -### Initial Apply Failure +### Azure Initial Apply Failure If your initial apply fails, follow these steps to troubleshoot the issue: - - - - - - + + + + + From cc8121f783e260d1f0f6e715ae7fcd5a11bb3fba Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Wed, 1 Oct 2025 15:11:25 -0400 Subject: [PATCH 15/39] fix: Fixing up some paper cuts in the top-level setup & installation docs --- docs/2.0/docs/pipelines/installation/overview.md | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/overview.md b/docs/2.0/docs/pipelines/installation/overview.md index 1a2320dc2b..917751ccab 100644 --- a/docs/2.0/docs/pipelines/installation/overview.md +++ b/docs/2.0/docs/pipelines/installation/overview.md @@ -2,9 +2,9 @@ Pipelines integrates multiple technologies to deliver a comprehensive CI/CD solution. This guide outlines the available installation methods and their respective use cases. -## Installation as part of DevOps Foundations +## Installation as part of Account Factory -Customers using DevOps Foundations benefit from a guided setup process that includes the complete installation of Gruntwork Pipelines. This process is facilitated by a Gruntwork solutions engineer and includes the following steps: +Customers using Account Factory benefit from a guided setup process that includes the complete installation of Gruntwork Pipelines. This process is facilitated by a Gruntwork solutions engineer and includes the following steps: 1. Creating a new `infrastructure-live-root` repository from the [`infrastructure-live-root-template`](https://github.com/gruntwork-io/infrastructure-live-root-template) template. 2. (On GitHub) Installing the [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) on the `infrastructure-live-root` repository or across the entire organization. For detailed instructions, refer to [this guide](/2.0/docs/pipelines/installation/viagithubapp). @@ -14,9 +14,9 @@ Completing these steps results in a repository fully configured for automated in ## Installation via manual setup -For users not leveraging DevOps Foundations or needing Gruntwork Pipelines for a standalone repository with existing Terragrunt configurations, Gruntwork Pipelines can be installed as an independent GitHub Actions or GitLab pipelines workflow. +For users not leveraging Account Factory or needing Gruntwork Pipelines for a standalone repository with existing Terragrunt configurations, Gruntwork Pipelines can be installed as an independent GitHub Actions or GitLab pipelines workflow. -To learn more about this process, consult the documentation for [Adding Pipelines to an Existing Repository](/2.0/docs/pipelines/installation/addingexistingrepo). +To learn more about this process, consult the documentation for [Adding Pipelines to a New Repository](/2.0/docs/pipelines/installation/addingnewrepo) or [Adding Pipelines to an Existing Repository](/2.0/docs/pipelines/installation/addingexistingrepo). ## Platform differences @@ -29,15 +29,9 @@ For GitHub Actions, you have two authentication options: 1. [GitHub App Authentication](/2.0/docs/pipelines/installation/viagithubapp) (Recommended) 2. [Machine User Authentication](/2.0/docs/pipelines/installation/viamachineusers) -### GitLab CI/CD (Beta) +### GitLab CI/CD For GitLab CI/CD: 1. [Machine User Authentication](/2.0/docs/pipelines/installation/viamachineusers) is the only supported method 2. Contact Gruntwork support to authorize your GitLab groups - -:::note - - Account Factory features are not currently available on GitLab - - ::: \ No newline at end of file From e9e9aae1ad5dbf50ca6bffb9643c5d02f05efb11 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 10:00:49 -0400 Subject: [PATCH 16/39] fix: Fixing path to new prerequisites for Account Factory --- docs/2.0/docs/overview/getting-started/index.md | 2 +- docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/2.0/docs/overview/getting-started/index.md b/docs/2.0/docs/overview/getting-started/index.md index 4f3d5dca36..4d2a3fb34e 100644 --- a/docs/2.0/docs/overview/getting-started/index.md +++ b/docs/2.0/docs/overview/getting-started/index.md @@ -7,7 +7,7 @@ Create your Gruntwork account and invite your team members to access Gruntwork r -### Step 2: [Set up a Landing Zone](/2.0/docs/pipelines/installation/prerequisites/awslandingzone) +### Step 2: [Set up a Landing Zone](/2.0/docs/accountfactory/prerequisites/awslandingzone) Follow Gruntwork's AWS Landing Zone walkthrough to implement a best-practice multi-account setup, ready for use with DevOps Foundations. diff --git a/docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx b/docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx index e68dd8fc26..1f25ee7a27 100644 --- a/docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx +++ b/docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx @@ -19,7 +19,7 @@ Delegating infrastructure management might be necessary for reasons such as: For example, a repository with application code may need to build and push a container image to AWS ECR before deploying it to a Kubernetes cluster. -The following guide assumes you have completed the [Pipelines Setup & Installation](/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md). +The following guide assumes you have completed the [Pipelines Setup & Installation](/2.0/docs/accountfactory/prerequisites/awslandingzone). ## Step 1 - Verify the delegated account setup From 2dbeed431ba9e91a31c190aa6a4c11e0d3906c82 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 10:01:06 -0400 Subject: [PATCH 17/39] chore: Making sure this is pinned to `v4` before I forget --- docs/2.0/docs/pipelines/installation/addingnewrepo.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index 41973d71d0..aaef11659f 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -568,7 +568,7 @@ permissions: jobs: GruntworkPipelines: - uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@main + uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@v4 ``` :::tip From b89c0dbba5feac33bc2fb841950b7383c8667c05 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 12:13:12 -0400 Subject: [PATCH 18/39] fix: Cleaning up Azure guide --- .../pipelines/installation/addingnewrepo.mdx | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index aaef11659f..a369fa7531 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -236,12 +236,10 @@ locals { } stack "bootstrap" { - source = "git@github.com:gruntwork-io/terragrunt-scale-catalog.git//stacks/azure/bootstrap?ref=v1.0.0" + source = "https://github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/github/pipelines-bootstrap?ref=v1.0.0" path = "bootstrap" values = { - terragrunt_scale_catalog_url = "git@github.com:gruntwork-io/terragrunt-scale-catalog.git" - // Set the location to the location you want to bootstrap your subscription in. location = "East US" @@ -250,11 +248,11 @@ stack "bootstrap" { state_storage_account_name = local.sub_hcl.locals.state_storage_account_name state_storage_container_name = local.sub_hcl.locals.state_storage_container_name - // Set the organization name you want to use for your subscription. - org_name = "acme" + // Set the organization name you want Azure to trust for OIDC. + github_org_name = "acme" - // Set the repository name you want to use for your subscription. - repo_name = "infrastructure-live" + // Set the repository name you want Azure to trust for OIDC. + github_repo_name = "infrastructure-live" // Set the OIDC resource prefix you want to use for your subscription. // @@ -269,8 +267,8 @@ stack "bootstrap" { - - + + ::: @@ -299,7 +297,7 @@ Make sure to run `mise install` to install the `azure-cli` tool. mise install ``` -If you haven't already, you'll want to authenticate with Azure using the `az` CLI. +If you haven't already, you'll want to authenticate to Azure using the `az` CLI. ```bash az login From 5a0f658238ab1948cc5ced135bca3d49003d5116 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 15:10:53 -0400 Subject: [PATCH 19/39] docs: Adding AWS docs --- .../pipelines/installation/addingnewrepo.mdx | 221 +++++++++++++++++- 1 file changed, 210 insertions(+), 11 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index a369fa7531..8ee5972ce9 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -79,7 +79,7 @@ Pipelines needs the ability to interact with Source Control Management (SCM) pla There are two ways to configure SCM access for Pipelines: 1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). -2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitLab users, and for GitHub users who cannot use the GitHub App). +2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitHub users who cannot use the GitHub App). :::note Progress Checklist @@ -110,33 +110,222 @@ If you don't have Git installed, you can install it by following the official gu The resources that you need provisioned in AWS to start managing resources with Pipelines are: -1. An OpenID Connect (OIDC) provider -2. An IAM role for Pipelines to assume when running Terragrunt plan commands -3. An IAM role for Pipelines to assume when running Terragrunt apply commands +1. An S3 bucket for OpenTofu state storage +2. An OpenID Connect (OIDC) provider +3. An IAM role for Pipelines to assume when running Terragrunt plan commands +4. An IAM role for Pipelines to assume when running Terragrunt apply commands For every account you want Pipelines to manage infrastructure in. +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository +2. Use Terragrunt to provision these resources in your AWS account + +### Bootstrap your `infrastructure-live` repository + +To bootstrap your AWS account for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: + +```hcl title="root.hcl" +locals { + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) + state_bucket_name = local.account_hcl.locals.state_bucket_name + + region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) + aws_region = local.region_hcl.locals.aws_region +} + +remote_state { + backend = "s3" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + bucket = local.state_bucket_name + region = local.aws_region + key = "${path_relative_to_include()}/tofu.tfstate" + encrypt = true + use_lockfile = true + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers and remote state configuration. + +```hcl title="name-of-account/account.hcl" +locals { + // This is the name of the S3 bucket that will be created for state storage. + // + // Make sure this is globally unique across all AWS accounts, as S3 bucket names must be globally unique. + // You will need to change this. + state_bucket_name = "your-unique-bucket-name-for-state" +} +``` + +:::note Progress Checklist + + + + + +::: + +This file is used by all units in the `name-of-account` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your account. + +```hcl title="name-of-account/_global/region.hcl" +locals { + aws_region = "us-east-1" +} +``` + :::tip -What follows is a guide for creating the basic, minimal set of resources required to get started. If you have access to Gruntwork's [Infrastructure Library](/2.0/docs/library/concepts/overview), you can use off the following off-the-shelf modules to do the work for you: +This region configuration is being set because the AWS API needs to make API calls to _some_ AWS region, but all the resources are, in fact, global. + +The AWS IAM service is a global service, which is why we're storing the bootstrap resources in the `_global` directory. + +::: + +:::note Progress Checklist -- [OIDC Provider for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-openid-connect-provider/) -- [IAM Role for GitHub Actions](/reference/modules/terraform-aws-security/github-actions-iam-role/) + + + ::: -To get started, you can create the modules that you are going to provision for each of these in a `catalog/modules` directory. +This file is used by all units in the `_global` directory to ensure that Terragrunt configurations know which AWS region to use for the OpenTofu AWS provider configuration. + +```hcl title="name-of-account/_global/bootstrap/terragrunt.stack.hcl" +locals { + // Read from parent configurations instead of defining these values locally + // so that other stacks and units in this directory can reuse the same configurations. + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) +} + +stack "bootstrap" { + source = "https://github.com/gruntwork-io/terragrunt-scale-catalog//stacks/aws/github/pipelines-bootstrap?ref=v1.0.0" + path = "bootstrap" + + values = { + // Set the OIDC resource prefix you want to use for your account. + // + // This will be used to determine the names of the OIDC resources like the IAM roles that are created. + // e.g. `pipelines-plan`, `pipelines-apply`, etc. + oidc_resource_prefix = "pipelines" + + // Set the organization name you want AWS to trust for OIDC. + github_org_name = "acme" + + // Set the repository name you want AWS to trust for OIDC. + github_repo_name = "infrastructure-live" + + // Read from parent configurations instead of defining these values locally. + state_bucket_name = local.account_hcl.locals.state_bucket_name + } +} +``` + +:::note Progress Checklist + + + + + + + +::: + +You'll also want to make sure that you add the `aws` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with AWS for the bootstrapping process. + +```toml title=".mise.toml" +[tools] +# The Terragrunt and OpenTofu entries should already be present... +awscli = "2.22.0" +``` + +:::tip + +Remember that you can use `ls-remote` to list the available versions of the `awscli` tool. ```bash -mkdir -p catalog/modules/{github-actions-oidc-provider, github-actions-iam-role} +mise ls-remote awscli ``` -You'll also want to create the scaffolding for the Terragrunt units you want to provision. +::: + +Make sure to run `mise install` to install the `awscli` tool. ```bash -mkdir -p live/acme/_global/{github-actions-oidc-provider, github-actions-plan-role, github-actions-apply-role} +mise install +``` + +If you haven't already, you'll want to authenticate to AWS using the `aws` CLI. + +```bash +aws configure +``` + +:::note Progress Checklist + + + + +::: + +### Provisioning the resources + +Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account. + +First, make sure that everything is set up correctly by running a plan in the bootstrap directory. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive plan ``` +Next, apply the changes to your account. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive apply +``` + +:::note Progress Checklist + + + +::: + +:::tip Troubleshooting Tips + +If you encounter issues during this step, please refer to the [AWS Initial Apply Failure](#aws-initial-apply-failure) section. + +::: + @@ -615,6 +804,16 @@ You have successfully completed the installation of Gruntwork Pipelines in a new If you encounter one of the following issues, please refer to the troubleshooting guidance for each scenario. +### AWS Initial Apply Failure + +If your initial apply fails, follow these steps to troubleshoot the issue: + + + + + + + ### Azure Initial Apply Failure If your initial apply fails, follow these steps to troubleshoot the issue: From 5600d559a6fd20212ac7e54442a69c0d0d15534b Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 15:13:41 -0400 Subject: [PATCH 20/39] fix: Cleaning up language for sidebar on GitHub --- sidebars/docs.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sidebars/docs.js b/sidebars/docs.js index dc063efbfa..30c911d33f 100644 --- a/sidebars/docs.js +++ b/sidebars/docs.js @@ -257,17 +257,17 @@ const sidebar = [ collapsed: false, items: [ { - label: "Creating a New GitHub Repository with Pipelines", + label: "Bootstrap Pipelines in a New GitHub Repository", type: "doc", id: "2.0/docs/pipelines/installation/addingnewrepo", }, { - label: "Adding Pipelines to an Existing GitHub Repository", + label: "Bootstrap Pipelines in an Existing GitHub Repository", type: "doc", id: "2.0/docs/pipelines/installation/addingexistingrepo", }, { - label: "Adding Branch Protection to a Repository", + label: "Adding Branch Protection to a GitHub Repository", type: "doc", id: "2.0/docs/pipelines/installation/branch-protection", }, From f6486669ce15bd91604dc27e8eb4ff0f735bcd41 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 16:00:54 -0400 Subject: [PATCH 21/39] docs: WIP progress on adding Pipelines to an existing repo --- .../installation/addingexistingrepo.md | 553 ----------------- .../installation/addingexistingrepo.mdx | 577 ++++++++++++++++++ .../pipelines/installation/addingnewrepo.mdx | 15 +- 3 files changed, 584 insertions(+), 561 deletions(-) delete mode 100644 docs/2.0/docs/pipelines/installation/addingexistingrepo.md create mode 100644 docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.md b/docs/2.0/docs/pipelines/installation/addingexistingrepo.md deleted file mode 100644 index 9149fbd80c..0000000000 --- a/docs/2.0/docs/pipelines/installation/addingexistingrepo.md +++ /dev/null @@ -1,553 +0,0 @@ -import CustomizableValue from '/src/components/CustomizableValue'; - -# Adding Gruntwork Pipelines to an existing repository - -This guide provides instructions for installing Gruntwork Pipelines in a repository with existing IaC. This guide is for Gruntwork customers looking to integrate Pipelines into their existing repositories for streamlined infrastructure management. - -:::info - -This process leverages a new configuration paradigm for Pipelines called ["Pipelines Configuration as Code"](/2.0/reference/pipelines/configurations-as-code), introduced in July 2024. This system allows developers to use Gruntwork Pipelines with any folder structure in their IaC repositories. Previously, Pipelines required a specific folder layout to map source control directories to AWS Accounts for authentication. - -**As of Q4 2024, this new configuration system does not yet support the [Gruntwork Account Factory](https://docs.gruntwork.io/2.0/docs/accountfactory/concepts/).** If you need both Pipelines and the Account Factory, we recommend [starting with a new repository](/2.0/docs/pipelines/installation/addingnewrepo) or contacting [Gruntwork support](/support) for assistance. -::: - -## Prerequisites - -- **Active Gruntwork subscription**: Ensure your account includes access to Pipelines. Verify access by navigating to the "View team in GitHub" option in the [Gruntwork Developer Portal's account page](https://app.gruntwork.io/account) if you are an admin. From the GitHub team UI, search for "pipelines" under the repositories tab to confirm access. -- **AWS credentials**: You need credentials with permissions to create resources in the AWS account where Pipelines will be deployed. This includes creating an OpenID Connect (OIDC) Provider and AWS Identity and Access Management (IAM) roles for Pipelines to use when deploying infrastructure. - -## Setting up the repository - -### Account information - -Create an `accounts.yml` file in the root directory of your repository with the following content. Replace , , and with the appropriate values for the account you are deploying to. Add additional accounts as needed to manage them with Pipelines. - - ```yaml title="accounts.yml" - # required: Name of an account - $$AWS_ACCOUNT_NAME$$: - # required: The AWS account ID - id: "$$AWS_ACCOUNT_ID$$" - # required: The email address of the account owner - email: "$$AWS_ACCOUNT_EMAIL$$" - ``` - -### Pipelines configurations - -Create a file named `.gruntwork/gruntwork.hcl` in the root directory of your repository with the following content. This file is used to configure Pipelines for your repository. Update the specified placeholders with the appropriate values: - -- : Specify a name that represents the environment being deployed, such as `production`, `staging`, or `development`. -- : Define the root-relative path of the folder in your repository that contains the terragrunt units for the environment you are deploying to. This may be the same as the environment name if there is a directory in the root of the repository that contains all the terragrunt units for the environment. -- : Enter the AWS Account ID associated with the deployment of Terragrunt units for the specified environment. -- : Specify the branch name used for deployments, such as `main` or `master`. This branch will trigger the Pipelines apply workflow when changes are merged. Pull requests targeting this branch will trigger the Pipelines plan workflow. - - -```hcl title=".gruntwork/gruntwork.hcl" -# Configurations applicable to the entire repository https://docs.gruntwork.io/2.0/docs/pipelines/installation/addingexistingrepo#repository-blocks -repository { - deploy_branch_name = "$$DEPLOY_BRANCH_NAME$$" -} - -aws { - accounts "all" { - // Reading the accounts.yml file from the root of the repository - path = "../accounts.yml" - } -} - -# Configurations that are applicable to a specific environment within a repository # https://docs.gruntwork.io/2.0/docs/pipelines/installation/addingexistingrepo#environment-blocks -environment "$$ENVIRONMENT_NAME$$" { - filter { - paths = ["$$PATH_TO_ENVIRONMENT$$/*"] - } - - authentication { - aws_oidc { - account_id = aws.accounts.all.$$AWS_ACCOUNT_NAME$$.id - plan_iam_role_arn = "arn:aws:iam::${aws.accounts.all.$$AWS_ACCOUNT_NAME$$.id}:role/pipelines-plan" - apply_iam_role_arn = "arn:aws:iam::${aws.accounts.all.$$AWS_ACCOUNT_NAME$$.id}:role/pipelines-apply" - } - } -} -``` - -The IAM roles mentioned in the unit configuration above will be created in the [Pipelines OpenID Connect (OIDC) Provider and Roles](#pipelines-openid-connectoidc-provider-and-roles) section. - -For additional environments, you can add new [environment configurations](/2.0/reference/pipelines/configurations-as-code#environment-configurations). Alternatively, consider using [unit configuration](/2.0/reference/pipelines/configurations-as-code#unit-configurations) for Terragrunt units in your repository that do not align with an environment configuration. - -### Pipelines GitHub Actions (GHA) workflow - -Pipelines is implemented using a GitHub [reusable workflow](https://docs.github.com/en/actions/sharing-automations/reusing-workflows#creating-a-reusable-workflow). The actual code for Pipelines and its features resides in an external repository, typically [Gruntwork's Pipelines Workflows repository](https://github.com/gruntwork-io/pipelines-workflows/). Your repository references this external workflow rather than containing the implementation itself. - -Create a file named `.github/workflows/pipelines.yml` in the root of your repository with the following content: - -
-Pipelines GHA workflow file - -```yaml title=".github/workflows/pipelines.yml" -###################################################################################################################### -# INFRASTRUCTURE CI/CD CONFIGURATION -# -# This file configures GitHub Actions to implement a CI/CD pipeline for managing infrastructure code. -# -# The pipeline defined in this configuration includes the following steps: -# -# - For any commit on any branch, identify all Terragrunt modules that have changed between the `HEAD` of the branch and -# `main`, and run `terragrunt plan` on each of those modules. -# - For commits to `main`, execute `terragrunt apply` on each of the updated modules. -# -###################################################################################################################### - -name: Pipelines -run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}" -on: - push: - branches: - - $$DEPLOY_BRANCH_NAME$$ - paths-ignore: - # Workflow does not run only if ALL filepaths match the pattern. See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#example-excluding-paths - - ".github/**" - pull_request: - types: - - opened - - synchronize - - reopened - -# Permissions to assume roles and create pull requests -permissions: - id-token: write - -jobs: - GruntworkPipelines: - # https://github.com/gruntwork-io/pipelines-workflows/blob/v3/.github/workflows/pipelines.yml - uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@v3 - secrets: - PIPELINES_READ_TOKEN: ${{ secrets.PIPELINES_READ_TOKEN }} - - PipelinesPassed: - needs: GruntworkPipelines - if: always() - runs-on: ubuntu-latest - steps: - - run: | - echo "::debug::RESULT: $RESULT" - if [[ $RESULT = "success" ]]; then - echo "GruntworkPipelines completed successfully!" - else - echo "GruntworkPipelines failed!" - exit 1 - fi - env: - RESULT: ${{ needs.GruntworkPipelines.result }} -``` - -
- -### Pipelines OpenID Connect (OIDC) provider and roles - -This step involves creating the Infrastructure as Code (IaC) configuration for the [OIDC](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services) roles required by Pipelines to deploy infrastructure. - -Two roles are needed: -- `pipelines-plan` for plans -- `pipelines-apply` for applies - -Using two distinct roles upholds the principle of least privilege. The `pipelines-plan` role is used during pull request creation or updates and requires primarily read-only permissions. The `pipelines-apply` role, used during pull request merges, requires read/write permissions. Additionally, these roles have different IAM trust policies. The `apply` role only trusts the deploy branch, while the `plan` role trusts all branches. - -This step requires AWS credentials with sufficient permissions to create the necessary IAM resources that Pipelines will assume when deploying infrastructure. - -#### Create the Terragrunt units - -Within the ** directory, create the Terragrunt unit files as described below, updating the following values as needed: - -- : Specify the state bucket name or pattern of the state bucket(s) to be used for the environment. The Pipeline roles must have permissions to access the state bucket for storing and retrieving state files. -- : Specify the name of the DynamoDB table used for state locking. -- : Provide the exact name of the repository where Pipelines is being configured. - -
-OIDC Provider - -```hcl title="$$PATH_TO_ENVIRONMENT$$/_global/github-actions-openid-connect-provider/terragrunt.hcl" -terraform { - source = "git@github.com:gruntwork-io/terraform-aws-security.git//modules/github-actions-openid-connect-provider?ref=v0.74.5" -} - -# Include the root `terragrunt.hcl` configuration, which has settings common across all environments & components. -include "root" { - path = find_in_parent_folders() -} - -inputs = { - allowed_organizations = [ - "$$GITHUB_ORG_NAME$$", - ] -} -``` - -
- -
-Pipelines Plan - -```hcl title="$$PATH_TO_ENVIRONMENT$$/_global/pipelines-plan-role/terragrunt.hcl" -terraform { - source = "git@github.com:gruntwork-io/terraform-aws-security.git//modules/github-actions-iam-role?ref=v0.74.5" -} - -# Include the root `terragrunt.hcl` configuration, which has settings common across all environments & components. -include "root" { - path = find_in_parent_folders() -} - -# The OIDC IAM roles for GitHub Actions require an IAM OpenID Connect (OIDC) Provider to be provisioned for each account. -# The underlying module used in `envcommon` is capable of creating the OIDC provider. Since multiple OIDC roles are required, -# a dedicated module is used, and all roles depend on its output -dependency "github-actions-openid-connect-provider" { - config_path = "../github-actions-openid-connect-provider" - - # Configure mock outputs for the `validate` command that are returned when there are no outputs available (e.g the - # module hasn't been applied yet. - mock_outputs_allowed_terraform_commands = ["validate", "plan"] - mock_outputs_merge_strategy_with_state = "shallow" - mock_outputs = { - arn = "known_after_apply" - url = "token.actions.githubusercontent.com" - } -} - -locals { - state_bucket_pattern = lower("$$AWS_STATE_BUCKET_PATTERN$$") -} - -inputs = { - github_actions_openid_connect_provider_arn = dependency.github-actions-openid-connect-provider.outputs.arn - github_actions_openid_connect_provider_url = dependency.github-actions-openid-connect-provider.outputs.url - - allowed_sources_condition_operator = "StringLike" - - allowed_sources = { - "$$GITHUB_ORG_NAME$$/$$INFRASTRUCTURE_LIVE_REPO_NAME$$" : ["*"] - } - - custom_iam_policy_name = "pipelines-plan-oidc-policy" - iam_role_name = "pipelines-plan" - - # Policy based on these docs: - # https://terragrunt.gruntwork.io/docs/features/aws-auth/#aws-iam-policies - iam_policy = { - # State permissions - "DynamoDBLocksTableAccess" = { - effect = "Allow" - actions = [ - "dynamodb:PutItem", - "dynamodb:GetItem", - "dynamodb:DescribeTable", - "dynamodb:DeleteItem", - "dynamodb:CreateTable", - ] - resources = ["arn:aws:dynamodb:*:*:table/$$AWS_DYNAMO_DB_TABLE$$"] - } - "S3StateBucketAccess" = { - effect = "Allow" - actions = [ - "s3:ListBucket", - "s3:GetBucketVersioning", - "s3:GetBucketAcl", - "s3:GetBucketLogging", - "s3:CreateBucket", - "s3:PutBucketPublicAccessBlock", - "s3:PutBucketTagging", - "s3:PutBucketPolicy", - "s3:PutBucketVersioning", - "s3:PutEncryptionConfiguration", - "s3:PutBucketAcl", - "s3:PutBucketLogging", - "s3:GetEncryptionConfiguration", - "s3:GetBucketPolicy", - "s3:GetBucketPublicAccessBlock", - "s3:PutLifecycleConfiguration", - "s3:PutBucketOwnershipControls", - ] - resources = [ - "arn:aws:s3:::${local.state_bucket_pattern}", - ] - } - "S3StateBucketObjectAccess" = { - effect = "Allow" - actions = [ - "s3:PutObject", - "s3:GetObject" - ] - resources = [ - "arn:aws:s3:::${local.state_bucket_pattern}/*", - ] - } - } -} -``` - -
- -
-Pipelines Apply - - - -```hcl title="$$PATH_TO_ENVIRONMENT$$/_global/pipelines-apply-role/terragrunt.hcl" -terraform { - source = "git@github.com:gruntwork-io/terraform-aws-security.git//modules/github-actions-iam-role?ref=v0.74.5" -} - -# Include the root `terragrunt.hcl` configuration, which has settings common across all environments & components. -include "root" { - path = find_in_parent_folders() -} - -# The OIDC IAM roles for GitHub Actions require an IAM OpenID Connect (OIDC) Provider to be provisioned for each account. -# The underlying module used in `envcommon` is capable of creating the OIDC provider. Since multiple OIDC roles are required, -# a dedicated module is used, and all roles depend on its output. -dependency "github-actions-openid-connect-provider" { - config_path = "../github-actions-openid-connect-provider" - - # Configure mock outputs for the `validate` command that are returned when there are no outputs available (e.g the - # module hasn't been applied yet. - mock_outputs_allowed_terraform_commands = ["validate", "plan"] - mock_outputs_merge_strategy_with_state = "shallow" - mock_outputs = { - arn = "known_after_apply" - url = "token.actions.githubusercontent.com" - } -} - -locals { - # Automatically load account-level variables - state_bucket_pattern = lower("$$AWS_STATE_BUCKET_PATTERN$$") -} - -inputs = { - github_actions_openid_connect_provider_arn = dependency.github-actions-openid-connect-provider.outputs.arn - github_actions_openid_connect_provider_url = dependency.github-actions-openid-connect-provider.outputs.url - - allowed_sources = { - "$$GITHUB_ORG_NAME$$/$$INFRASTRUCTURE_LIVE_REPO_NAME$$" : ["$$DEPLOY_BRANCH_NAME$$"] - } - - # Policy for OIDC role assumed from GitHub in the "$$GITHUB_ORG_NAME$$/$$INFRASTRUCTURE_LIVE_REPO_NAME$$" repo - custom_iam_policy_name = "pipelines-apply-oidc-policy" - iam_role_name = "pipelines-apply" - - # Policy based on these docs: - # https://terragrunt.gruntwork.io/docs/features/aws-auth/#aws-iam-policies - iam_policy = { - "IamPassRole" = { - resources = ["*"] - actions = ["iam:*"] - effect = "Allow" - } - "IamCreateRole" = { - resources = [ - "arn:aws:iam::*:role/aws-service-role/orgsdatasync.servicecatalog.amazonaws.com/AWSServiceRoleForServiceCatalogOrgsDataSync" - ] - actions = ["iam:CreateServiceLinkedRole"] - effect = "Allow" - } - "S3BucketAccess" = { - resources = ["*"] - actions = ["s3:*"] - effect = "Allow" - } - "DynamoDBLocksTableAccess" = { - resources = ["arn:aws:dynamodb:*:*:table/terraform-locks"] - actions = ["dynamodb:*"] - effect = "Allow" - } - "OrganizationsDeployAccess" = { - resources = ["*"] - actions = ["organizations:*"] - effect = "Allow" - } - "ControlTowerDeployAccess" = { - resources = ["*"] - actions = ["controltower:*"] - effect = "Allow" - } - "IdentityCenterDeployAccess" = { - resources = ["*"] - actions = ["sso:*", "ds:*", "sso-directory:*"] - effect = "Allow" - } - "ECSDeployAccess" = { - resources = ["*"] - actions = ["ecs:*"] - effect = "Allow" - } - "ACMDeployAccess" = { - resources = ["*"] - actions = ["acm:*"] - effect = "Allow" - } - "AutoScalingDeployAccess" = { - resources = ["*"] - actions = ["autoscaling:*"] - effect = "Allow" - } - "CloudTrailDeployAccess" = { - resources = ["*"] - actions = ["cloudtrail:*"] - effect = "Allow" - } - "CloudWatchDeployAccess" = { - resources = ["*"] - actions = ["cloudwatch:*", "logs:*"] - effect = "Allow" - } - "CloudFrontDeployAccess" = { - resources = ["*"] - actions = ["cloudfront:*"] - effect = "Allow" - } - "ConfigDeployAccess" = { - resources = ["*"] - actions = ["config:*"] - effect = "Allow" - } - "EC2DeployAccess" = { - resources = ["*"] - actions = ["ec2:*"] - effect = "Allow" - } - "ECRDeployAccess" = { - resources = ["*"] - actions = ["ecr:*"] - effect = "Allow" - } - "ELBDeployAccess" = { - resources = ["*"] - actions = ["elasticloadbalancing:*"] - effect = "Allow" - } - "GuardDutyDeployAccess" = { - resources = ["*"] - actions = ["guardduty:*"] - effect = "Allow" - } - "IAMDeployAccess" = { - resources = ["*"] - actions = ["iam:*", "access-analyzer:*"] - effect = "Allow" - } - "KMSDeployAccess" = { - resources = ["*"] - actions = ["kms:*"] - effect = "Allow" - } - "LambdaDeployAccess" = { - resources = ["*"] - actions = ["lambda:*"] - effect = "Allow" - } - "Route53DeployAccess" = { - resources = ["*"] - actions = ["route53:*", "route53domains:*", "route53resolver:*"] - effect = "Allow" - } - "SecretsManagerDeployAccess" = { - resources = ["*"] - actions = ["secretsmanager:*"] - effect = "Allow" - } - "SNSDeployAccess" = { - resources = ["*"] - actions = ["sns:*"] - effect = "Allow" - } - "SQSDeployAccess" = { - resources = ["*"] - actions = ["sqs:*"] - effect = "Allow" - } - "SecurityHubDeployAccess" = { - resources = ["*"] - actions = ["securityhub:*"] - effect = "Allow" - } - "MacieDeployAccess" = { - resources = ["*"] - actions = ["macie2:*"] - effect = "Allow" - } - "ServiceQuotaDeployAccess" = { - resources = ["*"] - actions = ["servicequotas:*"] - effect = "Allow" - } - "EKSAccess" = { - resources = ["*"] - actions = ["eks:*"] - effect = "Allow" - } - "EventBridgeAccess" = { - resources = ["*"] - actions = ["events:*"] - effect = "Allow" - } - "ApplicationAutoScalingAccess" = { - resources = ["*"] - actions = ["application-autoscaling:*"] - effect = "Allow" - } - "ApiGatewayAccess" = { - resources = ["*"] - actions = ["apigateway:*"] - effect = "Allow" - } - } -} -``` - - - -
- - -:::tip - -The permissions in the files above are provided as examples and should be adjusted to align with the specific types of infrastructure managed in the repository. This ensures that Pipelines can execute the required actions to deploy your infrastructure effectively. - -Additionally, note that the IAM permissions outlined above do not include permissions to modify the role itself, for security purposes. - -::: - -Repeat this step for each environment you would like to manage with Pipelines. - -#### Create the OIDC resources - -Use your personal AWS access to execute the following commands to deploy the infrastructure for the Terragrunt units created in the previous step. Repeat this process for each account you plan to manage with Pipelines. - - ```bash - cd $$PATH_TO_ENVIRONMENT$$/_global - terragrunt run-all plan - ``` - -Review the plan output, and if everything appears correct, proceed to apply the changes. - - - ```bash - terragrunt run-all apply - ``` - -:::tip - -If you encounter issues with the plan or apply steps due to the presence of other resources in the *_global* folder, you can run the plan/apply steps individually for the Terragrunt units. Start with the `github-actions-openid-connect-provider` unit, as other units depend on it. - -::: - -#### Commit and push the changes - -Create a new branch and commit all changes, including **`[skip ci]`** in the commit message to prevent triggering the Pipelines workflow. Push the changes to the repository, create a Pull Request, and merge the changes into the branch specified in the `.github/workflows/pipelines.yml` file. - -## Enable GitHub authentication for pipelines - -Follow the instructions in [Authenticating via GitHub App](/2.0/docs/pipelines/installation/viagithubapp) to enable GitHub authentication for Pipelines in your repository using the Gruntwork.io GitHub App. This is the recommended authentication method. Alternatively, you can [Authenticate via Machine Users](/2.0/docs/pipelines/installation/viamachineusers) if preferred. - -## Next steps - -You have successfully completed the installation of Gruntwork Pipelines in an existing repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx new file mode 100644 index 0000000000..4a09cc1954 --- /dev/null +++ b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx @@ -0,0 +1,577 @@ +# Bootstrap Pipelines in an Existing Repository + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import PersistentCheckbox from '/src/components/PersistentCheckbox'; + +This guide provides comprehensive instructions for integrating [Gruntwork Pipelines](https://gruntwork.io/products/pipelines/) into an existing repository with Infrastructure as Code (IaC). This is designed for Gruntwork customers who want to add Pipelines to their current infrastructure repositories for streamlined CI/CD management. + +To configure Gruntwork Pipelines in an existing repository, complete the following steps (which are explained in detail below): + +1. **Plan your Pipelines setup** by identifying all environments and cloud accounts/subscriptions you need to manage. +2. **Bootstrap core infrastructure** in accounts/subscriptions that don't already have the required OIDC and state management resources. +3. **Configure SCM access** using either the [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) or [machine users](https://docs.github.com/en/get-started/learning-about-github/types-of-github-accounts#user-accounts). +4. **Create `.gruntwork` HCL configurations** to tell Pipelines how to authenticate and organize your environments. +5. **Create `.github/workflows/pipelines.yml`** to configure your GitHub Actions workflow. +6. **Commit and push** your changes to activate Pipelines. + +## Prerequisites + +Before starting, ensure you have: + +- **An active Gruntwork subscription** with Pipelines access. Verify by checking the [Gruntwork Developer Portal](https://app.gruntwork.io/account) and confirming access to "pipelines" repositories in your GitHub team. +- **Cloud provider credentials** with permissions to create OIDC providers and IAM roles in accounts where Pipelines will manage infrastructure. +- **Git installed** locally for cloning and managing your repository. +- **Existing IaC repository** with Terragrunt configurations you want to manage with Pipelines (if you are using OpenTofu/Terraform, and want to start using Terragrunt, read the [Quickstart Guide](https://terragrunt.gruntwork.io/docs/getting-started/quick-start)). + +## Planning Your Pipelines Setup + +Before implementing Pipelines, it's crucial to plan your setup by identifying all the environments and cloud resources you need to manage. + +### Identify Your Environments + +Review your existing repository structure and identify: + +1. **All environments** you want to manage with Pipelines (e.g., `dev`, `staging`, `prod`) +2. **Cloud accounts/subscriptions** associated with each environment +3. **Directory paths** in your repository that contain Terragrunt units for each environment +4. **Existing OIDC resources** that may already be provisioned in your accounts + +:::note Progress Checklist + + + + + + +::: + +### Determine Required OIDC Roles + +For each AWS Account / Azure Subscription you want to manage, you might already have some or all of the following resources provisioned. + + + + +**Required AWS Resources:** + +- An OIDC provider for GitHub Actions +- An IAM role for Pipelines to assume when running Terragrunt plan commands +- An IAM role for Pipelines to assume when running Terragrunt apply commands + + + + +**Required Azure Resources:** + +- Entra ID Application for plans with Federated Identity Credential +- Entra ID Application for applies with Federated Identity Credential +- Service Principals with appropriate role assignments +- Storage Account and Container for Terragrunt state storage (if not already existing) + + + + +:::note Progress Checklist + + + + +::: + +## Configuring SCM Access + +Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). + +There are two ways to configure SCM access for Pipelines: + +1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). +2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitHub users who cannot use the GitHub App). + +:::note Progress Checklist + + + +::: + +## Bootstrapping Core Infrastructure + +If your cloud accounts/subscriptions don't already have all the required OIDC and state management resources, you'll need to bootstrap them. This section provides the infrastructure code needed to set up these resources. + +:::tip + +If you already have all the resources listed, you can skip this section. + +If you have some of them provisioned, but not all, you can decide to either destroy the resources you already have provisioned and recreate them or import them into state. If you are not sure, please contact [Gruntwork support](/support). + +::: + + + + +### AWS Bootstrap Resources + +The resources you need provisioned in AWS to start managing resources with Pipelines are: + +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands + +For every account you want Pipelines to manage infrastructure in. + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your repository. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +### Bootstrap Your Repository for AWS + +First, confirm that you have a `root.hcl` file in the root of your repository that looks something like this: + +```hcl title="root.hcl" +locals { + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) + state_bucket_name = local.account_hcl.locals.state_bucket_name + + region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) + aws_region = local.region_hcl.locals.aws_region +} + +remote_state { + backend = "s3" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + bucket = local.state_bucket_name + region = local.aws_region + key = "${path_relative_to_include()}/tofu.tfstate" + encrypt = true + use_lockfile = true + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + + + +::: + +### Provision AWS Bootstrap Resources + +For each account that needs bootstrapping: + +1. Navigate to the bootstrap directory: + + ```bash + cd /_global/bootstrap + ``` + +2. Plan the bootstrap resources: + + ```bash + terragrunt run --all --non-interactive plan + ``` + +3. Apply the bootstrap resources: + + ```bash + terragrunt run --all --non-interactive apply + ``` + +:::note Progress Checklist + + + + +::: + + + + +### Azure Bootstrap Resources + +The resources you need provisioned in Azure to start managing resources with Pipelines are: + +1. An Azure Resource Group for OpenTofu state resources +2. An Azure Storage Account and Container for OpenTofu state storage +3. Entra ID Applications for plan and apply operations +4. Federated Identity Credentials for OIDC authentication +5. Service Principals with appropriate role assignments + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your repository. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +### Bootstrap Your Repository for Azure + +For each Azure subscription that needs bootstrapping, create the following structure: + +```hcl title="/bootstrap/terragrunt.stack.hcl" +stack "bootstrap" { + source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/github/pipelines-bootstrap?ref=v1.0.0" + path = "bootstrap" + + values = { + # Set the location for your resources + location = "East US" + + # State storage configuration + state_resource_group_name = "pipelines-rg" + state_storage_account_name = "your-unique-storage-account" + state_storage_container_name = "tfstate" + + # OIDC configuration + github_org_name = "your-github-org" + github_repo_name = "your-repo-name" + oidc_resource_prefix = "pipelines" + } +} +``` + +:::note Progress Checklist + + + + + +::: + +### Provision Azure Bootstrap Resources + +For each subscription that needs bootstrapping: + +1. Set environment variables: + + ```bash + export ARM_TENANT_ID="your-tenant-id" + export ARM_SUBSCRIPTION_ID="your-subscription-id" + ``` + +2. Navigate to the bootstrap directory: + + ```bash + cd /bootstrap + ``` + +3. Plan the bootstrap resources: + + ```bash + terragrunt run --all --non-interactive --provider-cache plan + ``` + +4. Apply the bootstrap resources: + + ```bash + terragrunt run --all --non-interactive --provider-cache apply + ``` + +5. Migrate state to remote storage: + + ```bash + terragrunt run --all --non-interactive --provider-cache -- init -migrate-state -force-copy + ``` + +:::note Progress Checklist + + + + + + +::: + + + + +## Creating `.gruntwork` HCL Configurations + +Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). + +### The `repository` block + +The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. + +```hcl title=".gruntwork/repository.hcl" +repository { + deploy_branch_name = "main" +} +``` + +:::note Progress Checklist + + + + +::: + +### The `environment` block + +Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). + +For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. + + + + +```hcl title=".gruntwork/environment-production.hcl" +environment "production" { + filter { + paths = ["prod/*"] + } + + authentication { + aws_oidc { + account_id = "123456789012" + plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. + +::: + +:::note Progress Checklist + + + + + + + +::: + + + + +```hcl title=".gruntwork/environment-production.hcl" +environment "production" { + filter { + paths = ["prod/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "11111111-1111-1111-1111-111111111111" + + plan_client_id = "33333333-3333-3333-3333-333333333333" + apply_client_id = "44444444-4444-4444-4444-444444444444" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. + +::: + +:::note Progress Checklist + + + + + + + + +::: + + + + +```hcl title=".gruntwork/environment-production.hcl" +environment "production" { + filter { + paths = ["prod/*"] + } + + authentication { + custom { + auth_provider_cmd = "./scripts/custom-auth-prod.sh" + } + } +} +``` + +:::tip + +Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. + +::: + +:::note Progress Checklist + + + + + + + +::: + + + + +## Creating `.github/workflows/pipelines.yml` + +Create a `.github/workflows/pipelines.yml` file in the root of your repository with the following content: + +```yaml title=".github/workflows/pipelines.yml" +name: Pipelines +run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}" +on: + push: + branches: + - main + paths-ignore: + - ".github/**" + pull_request: + types: + - opened + - synchronize + - reopened + paths-ignore: + - ".github/**" + +# Permissions to assume roles and create pull requests +permissions: + id-token: write + contents: write + pull-requests: write + +jobs: + GruntworkPipelines: + uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@v4 +``` + +:::tip + +You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwork-io/pipelines-workflows/blob/main/.github/workflows/pipelines.yml) to learn how this GitHub Actions workflow calls the Pipelines CLI to run your pipelines. + +::: + +:::note Progress Checklist + + + + +::: + +## Commit and Push Your Changes + +Commit and push your changes to your repository. + +:::note + +You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow before everything is properly configured. + +::: + +```bash +git add . +git commit -m "Add Pipelines configurations and GitHub Actions workflow [skip ci]" +git push +``` + +:::note Progress Checklist + + + + +::: + +🚀 You've successfully added Gruntwork Pipelines to your existing repository! + +## Next Steps + +You have successfully completed the installation of Gruntwork Pipelines in an existing repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. + +## Troubleshooting Tips + +If you encounter issues during the setup process, here are some common troubleshooting steps: + +### Bootstrap Resources Failure + +If your bootstrap resource provisioning fails: + + + + + + +### HCL Configuration Issues + +If your HCL configurations aren't working as expected: + + + + + +### GitHub Actions Workflow Issues + +If your GitHub Actions workflow isn't triggering correctly: + + + + diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index 8ee5972ce9..982f67f844 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -1,4 +1,4 @@ -# Initial Setup +# Bootstrap Pipelines in a New GitHub Repository import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -110,10 +110,9 @@ If you don't have Git installed, you can install it by following the official gu The resources that you need provisioned in AWS to start managing resources with Pipelines are: -1. An S3 bucket for OpenTofu state storage -2. An OpenID Connect (OIDC) provider -3. An IAM role for Pipelines to assume when running Terragrunt plan commands -4. An IAM role for Pipelines to assume when running Terragrunt apply commands +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands For every account you want Pipelines to manage infrastructure in. @@ -229,7 +228,7 @@ locals { } stack "bootstrap" { - source = "https://github.com/gruntwork-io/terragrunt-scale-catalog//stacks/aws/github/pipelines-bootstrap?ref=v1.0.0" + source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/aws/github/pipelines-bootstrap?ref=v1.0.0" path = "bootstrap" values = { @@ -266,7 +265,7 @@ You'll also want to make sure that you add the `aws` CLI to your `.mise.toml` fi ```toml title=".mise.toml" [tools] # The Terragrunt and OpenTofu entries should already be present... -awscli = "2.22.0" +awscli = "2.31.6" ``` :::tip @@ -425,7 +424,7 @@ locals { } stack "bootstrap" { - source = "https://github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/github/pipelines-bootstrap?ref=v1.0.0" + source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/github/pipelines-bootstrap?ref=v1.0.0" path = "bootstrap" values = { From 23e29c3f26ca10c090899c787a71cd3352ca795c Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:07:34 -0400 Subject: [PATCH 22/39] docs: More troubleshooting guidance --- .../installation/addingexistingrepo.mdx | 46 ++++++++++++++----- .../pipelines/installation/addingnewrepo.mdx | 10 +++- 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx index 4a09cc1954..3baf0e8a58 100644 --- a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx @@ -285,8 +285,9 @@ stack "bootstrap" { :::note Progress Checklist + - + ::: @@ -294,7 +295,7 @@ stack "bootstrap" { For each subscription that needs bootstrapping: -1. Set environment variables: +1. Set the environment variables used by the Azure provider to authenticate in a given subscription: ```bash export ARM_TENANT_ID="your-tenant-id" @@ -313,6 +314,12 @@ For each subscription that needs bootstrapping: terragrunt run --all --non-interactive --provider-cache plan ``` + :::tip + + We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + + ::: + 4. Apply the bootstrap resources: ```bash @@ -325,10 +332,16 @@ For each subscription that needs bootstrapping: terragrunt run --all --non-interactive --provider-cache -- init -migrate-state -force-copy ``` + :::tip + + We're using the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + + ::: + :::note Progress Checklist - + @@ -375,8 +388,8 @@ environment "production" { authentication { aws_oidc { - account_id = "123456789012" - plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" + account_id = "123456789012" + plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" } } @@ -389,6 +402,12 @@ Learn more about how Pipelines authenticates to AWS in the [Authenticating to AW ::: +:::tip + +Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to reuse common AWS configurations. + +::: + :::note Progress Checklist @@ -514,7 +533,7 @@ You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwor :::note Progress Checklist - + ::: @@ -555,8 +574,9 @@ If you encounter issues during the setup process, here are some common troublesh If your bootstrap resource provisioning fails: - - + + + @@ -565,13 +585,17 @@ If your bootstrap resource provisioning fails: If your HCL configurations aren't working as expected: - - + + ### GitHub Actions Workflow Issues -If your GitHub Actions workflow isn't triggering correctly: +If your GitHub Actions workflow isn't working as expected: + + + + diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index 982f67f844..114fa366e5 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -304,13 +304,19 @@ Once you've set up the Terragrunt configurations, you can use Terragrunt to prov First, make sure that everything is set up correctly by running a plan in the bootstrap directory. ```bash title="name-of-account/_global/bootstrap" -terragrunt run --all --non-interactive plan +terragrunt run --all --non-interactive --provider-cache plan ``` +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + Next, apply the changes to your account. ```bash title="name-of-account/_global/bootstrap" -terragrunt run --all --non-interactive apply +terragrunt run --all --non-interactive --provider-cache apply ``` :::note Progress Checklist From bed6d3058c57f69b48e43dc148beae59fe419371 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:28:51 -0400 Subject: [PATCH 23/39] docs: Adjusting language in `Setup & Installation` --- docs/2.0/docs/pipelines/installation/overview.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/overview.md b/docs/2.0/docs/pipelines/installation/overview.md index 917751ccab..68f0441794 100644 --- a/docs/2.0/docs/pipelines/installation/overview.md +++ b/docs/2.0/docs/pipelines/installation/overview.md @@ -12,9 +12,9 @@ Customers using Account Factory benefit from a guided setup process that include Completing these steps results in a repository fully configured for automated infrastructure deployments using GitOps workflows. -## Installation via manual setup +## Standalone Installation -For users not leveraging Account Factory or needing Gruntwork Pipelines for a standalone repository with existing Terragrunt configurations, Gruntwork Pipelines can be installed as an independent GitHub Actions or GitLab pipelines workflow. +For users not leveraging Account Factory or needing Gruntwork Pipelines for a standalone repository with existing Terragrunt configurations, Gruntwork Pipelines can be installed as an independent GitHub Actions Workflow or GitLab CI Pipeline. To learn more about this process, consult the documentation for [Adding Pipelines to a New Repository](/2.0/docs/pipelines/installation/addingnewrepo) or [Adding Pipelines to an Existing Repository](/2.0/docs/pipelines/installation/addingexistingrepo). From 4a4b09fe7c367dc491b285b50035238373add8ca Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:52:24 -0400 Subject: [PATCH 24/39] docs: Adjusting logic for repo setup --- .../pipelines/installation/authoverview.md | 15 ++++--- ...ch-protection.md => branch-protection.mdx} | 45 +++++-------------- .../installation/gitlab-branch-protection.md | 27 +++++++++++ .../docs/pipelines/installation/overview.md | 2 +- .../pipelines/installation/viagithubapp.md | 6 +-- ...viamachineusers.md => viamachineusers.mdx} | 4 +- sidebars/docs.js | 13 ++++-- 7 files changed, 61 insertions(+), 51 deletions(-) rename docs/2.0/docs/pipelines/installation/{branch-protection.md => branch-protection.mdx} (58%) create mode 100644 docs/2.0/docs/pipelines/installation/gitlab-branch-protection.md rename docs/2.0/docs/pipelines/installation/{viamachineusers.md => viamachineusers.mdx} (99%) diff --git a/docs/2.0/docs/pipelines/installation/authoverview.md b/docs/2.0/docs/pipelines/installation/authoverview.md index 74e34b0c7c..18706bca67 100644 --- a/docs/2.0/docs/pipelines/installation/authoverview.md +++ b/docs/2.0/docs/pipelines/installation/authoverview.md @@ -1,17 +1,20 @@ -# Authenticating Gruntwork Pipelines +# SCM Authentication Overview -Gruntwork Pipelines requires authentication with GitHub/GitLab to perform various functions, including: +Gruntwork Pipelines requires authentication with Source Control Management (SCM) platforms (e.g. GitHub, GitLab) for various reasons, including: -- Downloading Gruntwork code, such as the Pipelines binary and Terraform modules, from the `gruntwork-io` GitHub organization. +- Downloading Gruntwork software, such as the Pipelines binary and OpenTofu modules, from the `gruntwork-io` GitHub organization. - Interacting with your repositories, such as: - Creating pull requests. - Commenting on pull requests. - Creating new repositories via Account Factory. - - Updating repository settings, such as enforcing branch protection, via Account Factory. + - Updating repository settings, such as enforcing branch protection with Account Factory. -Gruntwork provides two authentication methods: a [GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md) and CI Users ([Machine Users](/2.0/docs/pipelines/installation/viamachineusers.md)) with personal access tokens for Pipelines. +Gruntwork provides two authentication methods: -Both approaches support the core functionality of Pipelines. However, the GitHub App provides additional features and benefits, making it the recommended method. While Gruntwork strives to ensure feature parity between the two authentication mechanisms, certain advanced features are exclusive to the GitHub App, and this list is expected to grow over time. +- [The Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md) +- [CI Users (Machine Users)](/2.0/docs/pipelines/installation/viamachineusers.md) + +Both approaches support the core functionality of Pipelines. The GitHub App provides additional features and benefits, making it the recommended method for most customers that can use it. While Gruntwork strives to ensure feature parity between the two authentication mechanisms, certain advanced features are exclusive to the GitHub App, and this list is expected to grow over time. ## Summary of authentication mechanisms for GitHub diff --git a/docs/2.0/docs/pipelines/installation/branch-protection.md b/docs/2.0/docs/pipelines/installation/branch-protection.mdx similarity index 58% rename from docs/2.0/docs/pipelines/installation/branch-protection.md rename to docs/2.0/docs/pipelines/installation/branch-protection.mdx index c9dcda359e..399ca57d43 100644 --- a/docs/2.0/docs/pipelines/installation/branch-protection.md +++ b/docs/2.0/docs/pipelines/installation/branch-protection.mdx @@ -1,14 +1,8 @@ -# Branch Protection +# Adding Branch Protection to a GitHub Repository -Gruntwork Pipelines is designed to function within a PR-based workflow. Approving a pull request (PR) or merge request (MR) signals approval to deploy infrastructure, so it's important to configure repository settings and branch protection accurately. +Gruntwork Pipelines is designed to function within a pull request (PR) based workflow. Approving a pull request signals approval to deploy infrastructure, so it's important to configure repository settings and branch protection accurately. -## Recommended settings - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - +## GitHub Recommended Settings By default, Gruntwork Pipelines runs a `plan` on every push to a PR and an `apply` on every push to `main`. To ensure that infrastructure changes are reviewed and approved before deployment, branch protection should be enabled on `main` to prevent unauthorized changes. @@ -40,32 +34,13 @@ Below is an example of the recommended branch protection settings: GitHub Enterprise customers can also configure [push rulesets](https://docs.github.com/en/enterprise-cloud@latest/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/about-rulesets#push-rulesets). This feature allows restricting edits to `.github/workflows` files, ensuring infrastructure changes are properly reviewed and approved through Pipelines. Follow the documentation [here](https://docs.github.com/en/enterprise-cloud@latest/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/creating-rulesets-for-a-repository#creating-a-push-ruleset) to enable push rulesets if available. ::: - - - -## GitLab Recommended Settings - -For GitLab repositories, similar protection rules should be configured on the default branch (typically `main`). Navigate to `Settings > Repository > Protected branches` to configure the following settings: - -- Set the initial default branch to **Protected**. -- Set **Allowed to merge** to "Developers" or a specific group to control who can merge changes. -- Set **Allowed to push** to "No one" to prevent direct pushes to the protected branch. -- (Optional) Enable **Require approval from code owners** to ensure designated reviewers approve changes to specific files. - -Below is an example of the recommended GitLab branch protection settings: - -![GitLab Branch Protection Settings](/img/pipelines/gitlab_branch_protection.png) - - - - -## Merge Request/Pull Request Workflow +## Pull Request Workflow -1. Developers make infrastructure changes on a branch and create a PR (GitHub) or MR (GitLab) against the default branch. -2. On merge request/pull request creation, Gruntwork Pipelines runs `plan` for any changes and posts the results as a comment. +1. Developers make infrastructure changes on a branch and create a pull request (PR) against the default branch. +2. On pull request creation, Gruntwork Pipelines runs `plan` for any changes and posts the results as a comment. 3. Gruntwork Pipelines re-runs `plan` on every push to the branch and updates the results in a comment. 4. Gather approvals. If Code Owners is enabled, all relevant code owners must approve the changes. -5. Once approved, merge the merge request/pull request into the default branch. -6. Gruntwork Pipelines runs `apply` for the changes from the merge request/pull request. - - On success, the merge request/pull request is updated to indicate the successful `apply`. - - On failure, the merge request/pull request is updated to indicate the failure of the `apply`. If the failure cannot be resolved by retrying, a new merge request/pull request must be created to address the issues. +5. Once approved, merge the pull request into the default branch. +6. Gruntwork Pipelines runs `apply` for the changes from the pull request. + - On success, the pull request is updated to indicate the successful `apply`. + - On failure, the pull request is updated to indicate the failure of the `apply`. If the failure cannot be resolved by retrying, a new pull request must be created to address the issues. diff --git a/docs/2.0/docs/pipelines/installation/gitlab-branch-protection.md b/docs/2.0/docs/pipelines/installation/gitlab-branch-protection.md new file mode 100644 index 0000000000..51d936034e --- /dev/null +++ b/docs/2.0/docs/pipelines/installation/gitlab-branch-protection.md @@ -0,0 +1,27 @@ +# Adding Branch Protection to a GitLab Project + +Gruntwork Pipelines is designed to function within a merge request (MR) based workflow. Approving a merge request signals approval to deploy infrastructure, so it's important to configure repository settings and branch protection accurately. + +## GitLab Recommended Settings + +For GitLab repositories, similar protection rules should be configured on the default branch (typically `main`). Navigate to `Settings > Repository > Protected branches` to configure the following settings: + +- Set the initial default branch to **Protected**. +- Set **Allowed to merge** to "Developers" or a specific group to control who can merge changes. +- Set **Allowed to push** to "No one" to prevent direct pushes to the protected branch. +- (Optional) Enable **Require approval from code owners** to ensure designated reviewers approve changes to specific files. + +Below is an example of the recommended GitLab branch protection settings: + +![GitLab Branch Protection Settings](/img/pipelines/gitlab_branch_protection.png) + +## Merge Request Workflow + +1. Developers make infrastructure changes on a branch and create a merge request (MR) against the default branch. +2. On merge request creation, Gruntwork Pipelines runs `plan` for any changes and posts the results as a comment. +3. Gruntwork Pipelines re-runs `plan` on every push to the branch and updates the results in a comment. +4. Gather approvals. If Code Owners is enabled, all relevant code owners must approve the changes. +5. Once approved, merge the merge request into the default branch. +6. Gruntwork Pipelines runs `apply` for the changes from the merge request. + - On success, the merge request is updated to indicate the successful `apply`. + - On failure, the merge request is updated to indicate the failure of the `apply`. If the failure cannot be resolved by retrying, a new merge request must be created to address the issues. diff --git a/docs/2.0/docs/pipelines/installation/overview.md b/docs/2.0/docs/pipelines/installation/overview.md index 68f0441794..aa37ab3978 100644 --- a/docs/2.0/docs/pipelines/installation/overview.md +++ b/docs/2.0/docs/pipelines/installation/overview.md @@ -34,4 +34,4 @@ For GitHub Actions, you have two authentication options: For GitLab CI/CD: 1. [Machine User Authentication](/2.0/docs/pipelines/installation/viamachineusers) is the only supported method -2. Contact Gruntwork support to authorize your GitLab groups +2. Contact [Gruntwork support](/support) to authorize your GitLab groups diff --git a/docs/2.0/docs/pipelines/installation/viagithubapp.md b/docs/2.0/docs/pipelines/installation/viagithubapp.md index ebec5153a2..a3e87289f6 100644 --- a/docs/2.0/docs/pipelines/installation/viagithubapp.md +++ b/docs/2.0/docs/pipelines/installation/viagithubapp.md @@ -4,7 +4,7 @@ toc_min_heading_level: 2 toc_max_heading_level: 3 --- -# Pipelines Install via GitHub App +# Installing the Gruntwork.io GitHub App The [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) is a [GitHub App](https://docs.github.com/en/apps/overview) introduced to help reduce the burden of integrating Gruntwork products to GitHub resources. The app is designed to be lightweight and flexible, providing a simple way to get started with Gruntwork products. @@ -66,7 +66,7 @@ As of 2024/09/10, the Gruntwork.io GitHub App requests the following permissions These permissions are used during the initial bootstrapping process when customers opt-in to additional repositories being created outside of the main `infrastructure-live-root` repository. - This is especially important for DevOps Foundations Enterprise customers, as those customers benefit from the ability to have `infrastructure-live-root` repositories create new repositories and add designated GitHub teams as collaborators via Infrastructure as Code (IaC). This is a critical feature for Enterprise customers who want to be able to scale their infrastructure management across multiple teams with delegated responsibility for segments of their IaC Estate. + This is especially important for Account Factory Enterprise customers, as those customers benefit from the ability to have `infrastructure-live-root` repositories create new repositories and add designated GitHub teams as collaborators via Infrastructure as Code (IaC). This is a critical feature for Enterprise customers who want to be able to scale their infrastructure management across multiple teams with delegated responsibility for segments of their IaC Estate.

Write access to Contents

@@ -166,7 +166,7 @@ To install the Gruntwork.io GitHub App in your organization follow these steps. ### Infrastructure Live Root Repositories -DevOps Foundations treats certain repositories as especially privileged in order to perform critical operations like vending new AWS accounts and creating new repositories. These repositories are called "infrastructure live root repositories" and you can configure them in the [GitHub Account section](https://app.gruntwork.io/account?scroll_to=github-app) for your organization in the Gruntwork developer portal **if you are a designated administrator**. +Account Factory treats certain repositories as especially privileged in order to perform critical operations like vending new AWS accounts and creating new repositories. These repositories are called "infrastructure live root repositories" and you can configure them in the [GitHub Account section](https://app.gruntwork.io/account?scroll_to=github-app) for your organization in the Gruntwork developer portal **if you are a designated administrator**. ![Root Repository Configuration](/img/devops-foundations/github-app/root-repo-config.png) diff --git a/docs/2.0/docs/pipelines/installation/viamachineusers.md b/docs/2.0/docs/pipelines/installation/viamachineusers.mdx similarity index 99% rename from docs/2.0/docs/pipelines/installation/viamachineusers.md rename to docs/2.0/docs/pipelines/installation/viamachineusers.mdx index 7d0d648c85..45d6ead0a9 100644 --- a/docs/2.0/docs/pipelines/installation/viamachineusers.md +++ b/docs/2.0/docs/pipelines/installation/viamachineusers.mdx @@ -4,7 +4,7 @@ toc_min_heading_level: 2 toc_max_heading_level: 4 --- -# Setting up Pipelines via GitHub Machine Users +# Creating Machine Users import PersistentCheckbox from '/src/components/PersistentCheckbox'; import Tabs from "@theme/Tabs" @@ -251,7 +251,7 @@ Required to update GitHub organization team members. When vending delegated repo :::tip -If you are not an Enterprise customer, you should delete it after DevOps Foundations setup. +If you are not an Enterprise customer, you should delete it after Account Factory onboarding. ::: ### ci-read-only-user diff --git a/sidebars/docs.js b/sidebars/docs.js index 30c911d33f..6f3df20be4 100644 --- a/sidebars/docs.js +++ b/sidebars/docs.js @@ -226,21 +226,21 @@ const sidebar = [ }, { type: "category", - label: "Enable Auth for Pipelines", + label: "Set up SCM Authentication", collapsed: false, items: [ { - label: "Auth Overview", + label: "Overview", type: "doc", id: "2.0/docs/pipelines/installation/authoverview", }, { - label: "Auth via GitHub App", + label: "GitHub App", type: "doc", id: "2.0/docs/pipelines/installation/viagithubapp", }, { - label: "Auth via Machine Users", + label: "Machine Users", type: "doc", id: "2.0/docs/pipelines/installation/viamachineusers", }, @@ -288,6 +288,11 @@ const sidebar = [ type: "doc", id: "2.0/docs/pipelines/installation/addinggitlabrepo", }, + { + label: "Adding Branch Protection to a GitLab Project", + type: "doc", + id: "2.0/docs/pipelines/installation/gitlab-branch-protection", + }, ], }, ], From 8579981ee94864e0a4141f5fc3ff18e944c105b4 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Fri, 3 Oct 2025 18:27:52 -0400 Subject: [PATCH 25/39] fix: Cutting down on steps for adding a new repo --- .../installation/addinggitlabrepo.md | 882 +++++++++++++++--- .../pipelines/installation/addingnewrepo.mdx | 701 +++++--------- sidebars/docs.js | 7 +- 3 files changed, 977 insertions(+), 613 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md index 142a0b577a..6085165183 100644 --- a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md +++ b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md @@ -1,192 +1,838 @@ -import CustomizableValue from '/src/components/CustomizableValue'; +# Bootstrap Pipelines in a New GitLab Repository -# Adding Pipelines to an existing GitLab Project +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import PersistentCheckbox from '/src/components/PersistentCheckbox'; -This guide walks you through the process of adding Gruntwork Pipelines to a GitLab project. By the end, you'll have a fully configured GitLab CI/CD pipeline that can deploy infrastructure changes automatically. +To configure Gruntwork Pipelines in a new GitLab repository, complete the following steps (which are explained in detail below): -## Prerequisites +1. Create an `infrastructure-live` repository. +2. Configure machine user tokens for GitLab access, or ensure that the appropriate machine user tokens are set up as repository or organization secrets. +3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. +4. Create `.gitlab-ci.yml` to tell your GitLab CI/CD pipeline how to run your pipelines. +5. Commit and push your changes to your repository. -Before you begin, make sure you have: +## Creating the infrastructure-live repository -- Basic familiarity with Git, GitLab, and infrastructure as code concepts -- Access to one (or many) AWS account(s) where you have permission to create IAM roles and OIDC providers -- Completed the [Pipelines Auth setup for GitLab](/2.0/docs/pipelines/installation/viamachineusers#gitlab) and setup a machine user with appropriate PAT tokens -- Local access to Gruntwork's GitHub repositories, specifically the [architecture catalog](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/) +Creating an `infrastructure-live` repository is fairly straightforward. First, create a new repository using the official GitLab documentation for [creating repositories](https://docs.gitlab.com/user/project/repository/). Name the repository something like `infrastructure-live` and make it private (or internal). -:::info +Clone the repository to your local machine using [Git](https://docs.gitlab.com/user/project/repository/index.html#clone-a-repository). -**For custom GitLab instances only**: You must [fork](https://docs.gitlab.com/user/project/repository/forking_workflow/#create-a-fork) Gruntwork's public [Pipelines workflow project](https://gitlab.com/gruntwork-io/pipelines-workflows) into your own GitLab instance. +:::tip -This is necessary because Gruntwork Pipelines uses [GitLab CI/CD components](/2.0/docs/pipelines/architecture/ci-workflows), and GitLab requires components to reside within the [same GitLab instance as the project referencing them](https://docs.gitlab.com/ci/components/#use-a-component). +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). -When creating the fork, we recommend configuring it as a public mirror of the original Gruntwork project and ensuring that tags are included. ::: -## Setup Process Overview +For example: -Setting up Gruntwork Pipelines for GitLab involves these main steps: +```bash +git clone git@gitlab.com:acme/infrastructure-live.git +``` -(prerequisite) Complete the [Pipelines Auth setup for GitLab](/2.0/docs/pipelines/installation/viamachineusers#gitlab) +:::note Progress Checklist -1. [Authorize Your GitLab Group with Gruntwork](#step-1-authorize-your-gitlab-group-with-gruntwork) -2. [Install required tools (mise, boilerplate)](#step-2-install-required-tools) -3. [Install Gruntwork Pipelines in Your Repository](#step-3-install-gruntwork-pipelines-in-your-repository) -4. [Install AWS OIDC Provider and IAM Roles for Pipelines](#step-4-install-aws-oidc-provider-and-iam-roles-for-pipelines) -5. [Complete the setup](#step-5-complete-the-setup) + -## Detailed Setup Instructions +::: -### Step 0: Ensure OIDC configuration and JWKS are publicly accessible +Once the repository is cloned locally, you'll want to create a `.mise.toml` file in the root of the repository to tell Pipelines what versions of Terragrunt and OpenTofu to use. -This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step. +For example: -1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance. -2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps. +```toml title=".mise.toml" +[tools] +terragrunt = "0.88.0" +opentofu = "1.10.6" +``` +:::tip -### Step 1: Authorize Your GitLab Group with Gruntwork +Follow the official [mise installation guide](https://mise.jdx.dev/getting-started.html) to install it locally. -To use Gruntwork Pipelines with GitLab, your group needs authorization from Gruntwork: +You can get `mise` to lookup the versions available for a given tool by using the `ls-remote` command. -1. Email your Gruntwork account manager or support@gruntwork.io with: + ```bash +mise ls-remote terragrunt +mise ls-remote opentofu +``` - ``` - GitLab group name(s): $$GITLAB_GROUP_NAME$$ (e.g. acme-io) - GitLab Issuer URL: $$ISSUER_URL$$ (For most users this is the URL of your GitLab instance e.g. https://gitlab.acme.io. If your instance is not publicly accessible, this should be a separate URL that is publicly accessible per step 0, e.g. https://s3.amazonaws.com/YOUR_BUCKET_NAME/) - Organization name: $$ORGANIZATION_NAME$$ (e.g. Acme, Inc.) - ``` +::: -2. Wait for confirmation that your group has been authorized. +Next, install Terragrunt and OpenTofu locally: -### Step 2: Install Required Tools + ```bash +mise install +``` -First, you'll need to install [mise](https://mise.jdx.dev/), a powerful environment manager that will help set up the required tools: +:::note Progress Checklist -1. Install mise by following the [getting started guide](https://mise.jdx.dev/getting-started.html) + + +::: -2. Activate mise in your shell: - ```bash - # For Bash - echo 'eval "$(~/.local/bin/mise activate bash)"' >> ~/.bashrc +## Configuring SCM Access - # For Zsh - echo 'eval "$(~/.local/bin/mise activate zsh)"' >> ~/.zshrc +Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). - # For Fish - echo 'mise activate fish | source' >> ~/.config/fish/config.fish - ``` +For GitLab, you'll need to configure SCM access using [machine users](/2.0/docs/pipelines/installation/viamachineusers.md#gitlab) with appropriate Personal Access Tokens (PATs). -3. Install the boilerplate tool, which will generate the project structure: - ```bash - # For mise version BEFORE 2025.2.10 - mise plugin add boilerplate https://github.com/gruntwork-io/asdf-boilerplate.git +:::note Progress Checklist - # For mise version 2025.2.10+ - mise plugin add boilerplate + - mise use boilerplate@0.6.0 - ``` +::: -4. Verify the installation: - ```bash - boilerplate --version +## Creating Cloud Resources for Pipelines - # If that doesn't work, try: - mise x -- boilerplate --version +To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. - # If that still doesn't work, check where boilerplate is installed: - mise which boilerplate - ``` +:::note + +If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. + +::: + +This guide will assume a blank slate, so you can start by creating a new Git repository to track the infrastructure that you're provisioning here. + +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + +::: + + + + +The resources that you need provisioned in AWS to start managing resources with Pipelines are: + +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands + +For every account you want Pipelines to manage infrastructure in. + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository +2. Use Terragrunt to provision these resources in your AWS account + +### Bootstrap your `infrastructure-live` repository + +To bootstrap your AWS account for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: + +```hcl title="root.hcl" +locals { + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) + state_bucket_name = local.account_hcl.locals.state_bucket_name + + region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) + aws_region = local.region_hcl.locals.aws_region +} + +remote_state { + backend = "s3" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + bucket = local.state_bucket_name + region = local.aws_region + key = "${path_relative_to_include()}/tofu.tfstate" + encrypt = true + use_lockfile = true + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: -### Step 3: Install Gruntwork Pipelines in Your Repository +This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers and remote state configuration. -1. Identify where you want to install Gruntwork Pipelines, for example create a new project/repository in your GitLab group (or use an existing one) named +```hcl title="name-of-account/account.hcl" +locals { + // This is the name of the S3 bucket that will be created for state storage. + // + // Make sure this is globally unique across all AWS accounts, as S3 bucket names must be globally unique. + // You will need to change this. + state_bucket_name = "your-unique-bucket-name-for-state" +} +``` + +:::note Progress Checklist + + + + + +::: + +This file is used by all units in the `name-of-account` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your account. + +```hcl title="name-of-account/_global/region.hcl" +locals { + aws_region = "us-east-1" +} +``` + +:::tip + +This region configuration is being set because the AWS API needs to make API calls to _some_ AWS region, but all the resources are, in fact, global. + +The AWS IAM service is a global service, which is why we're storing the bootstrap resources in the `_global` directory. + +::: + +:::note Progress Checklist + + + + + +::: + +This file is used by all units in the `_global` directory to ensure that Terragrunt configurations know which AWS region to use for the OpenTofu AWS provider configuration. + +```hcl title="name-of-account/_global/bootstrap/terragrunt.stack.hcl" +locals { + // Read from parent configurations instead of defining these values locally + // so that other stacks and units in this directory can reuse the same configurations. + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) +} + +stack "bootstrap" { + source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/aws/gitlab/pipelines-bootstrap?ref=v1.0.0" + path = "bootstrap" + + values = { + // Set the OIDC resource prefix you want to use for your account. + // + // This will be used to determine the names of the OIDC resources like the IAM roles that are created. + // e.g. `pipelines-plan`, `pipelines-apply`, etc. + oidc_resource_prefix = "pipelines" + + // Set the GitLab group name you want AWS to trust for OIDC. + gitlab_group_name = "acme" + + // Set the repository name you want AWS to trust for OIDC. + gitlab_repo_name = "infrastructure-live" + + // Set the GitLab instance URL (use https://gitlab.com for GitLab.com) + gitlab_instance_url = "https://gitlab.com" + + // Read from parent configurations instead of defining these values locally. + state_bucket_name = local.account_hcl.locals.state_bucket_name + } +} +``` + +:::note Progress Checklist + + + + + + + + +::: + +You'll also want to make sure that you add the `aws` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with AWS for the bootstrapping process. + +```toml title=".mise.toml" +[tools] +# The Terragrunt and OpenTofu entries should already be present... +awscli = "2.31.6" +``` + +:::tip + +Remember that you can use `ls-remote` to list the available versions of the `awscli` tool. -2. Clone the repository to your local machine if it's not already cloned: ```bash - git clone git@gitlab.com:$$GITLAB_GROUP_NAME$$/$$REPOSITORY_NAME$$.git - cd $$REPOSITORY_NAME$$ - ``` -3. Create a new branch for your changes: +mise ls-remote awscli +``` + +::: + +Make sure to run `mise install` to install the `awscli` tool. + ```bash - git checkout -b gruntwork-pipelines +mise install ``` -4. Download the sample [vars.yaml file](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/blob/main/examples/gitlab-pipelines/vars.yaml) to the root of +If you haven't already, you'll want to authenticate to AWS using the `aws` CLI. + + ```bash +aws configure +``` + +:::note Progress Checklist + + + + +::: + +### Provisioning the resources + +Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account. + +First, make sure that everything is set up correctly by running a plan in the bootstrap directory. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + +Next, apply the changes to your account. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache apply +``` + +:::note Progress Checklist + + + +::: + +:::tip Troubleshooting Tips + +If you encounter issues during this step, please refer to the [AWS Initial Apply Failure](#aws-initial-apply-failure) section. + +::: + + + + +The resources that you need provisioned in Azure to start managing resources with Pipelines are: + +1. An Azure Resource Group for OpenTofu state resources + 1. An Azure Storage Account in that resource group for OpenTofu state storage + 1. An Azure Storage Container in that storage account for OpenTofu state storage +2. An Entra ID Application to use for plans + 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + 2. A role assignment for the service principal to access the Azure Storage Account +3. An Entra ID Application to use for applies + 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository +2. Use Terragrunt to provision these resources in your Azure subscription +3. Pull the bootstrap resources into state, using the storage account we just provisioned + +### Bootstrap your Azure `infrastructure-live` repository + +To bootstrap your Azure subscription for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: + +```hcl title="root.hcl" +generate "provider" { + path = "provider.tf" + if_exists = "overwrite" + contents = < + +::: + +This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers. + +```hcl title="name-of-subscription/sub.hcl" +locals { + // This is the name of the resource group that will be created for state storage. + // + // You don't need to change this if you don't want to (and you don't already have a resource group named this). + state_resource_group_name = "pipelines-rg" + + // Make sure this is less than 24 characters, and only contains lowercase letters and numbers + // to obey Azure's naming requirements. + // + // You will need to change this. + state_storage_account_name = "name-of-storage-account-you-want-to-use-for-state" + + // This is the name of the container you'll use for state storage. + // + // You don't need to change this if you don't want to. + state_storage_container_name = "tfstate" +} +``` + +:::note Progress Checklist + + + + + + +::: + +This file is used by all units in the `name-of-subscription` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your subscription. + +```hcl title="name-of-subscription/bootstrap/terragrunt.stack.hcl" +locals { + // Read from parent configurations instead of defining these values locally + // so that other stacks and units in this directory can reuse the same configurations. + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) +} + +stack "bootstrap" { + source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/gitlab/pipelines-bootstrap?ref=v1.0.0" + path = "bootstrap" + + values = { + // Set the location to the location you want to bootstrap your subscription in. + location = "East US" + + // Read from parent configurations instead of defining these values locally. + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name + + // Set the GitLab group name you want Azure to trust for OIDC. + gitlab_group_name = "acme" + + // Set the repository name you want Azure to trust for OIDC. + gitlab_repo_name = "infrastructure-live" + + // Set the GitLab instance URL (use https://gitlab.com for GitLab.com) + gitlab_instance_url = "https://gitlab.com" + + // Set the OIDC resource prefix you want to use for your subscription. + // + // This will be used to determine the names of the OIDC resources like the Entra ID Applications that are created. + // e.g. `pipelines`-plan, `pipelines`-apply, etc. + oidc_resource_prefix = "pipelines" + } +} +``` + +:::note Progress Checklist + + + + + + + -4. Edit the `vars.yaml` file to customize it for your environment. If using a custom GitLab instance, update any custom instance variables. +::: + +You'll also want to make sure that you add the `azure` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with Azure for the bootstrapping process. + +```toml title=".mise.toml" +[tools] +# The Terragrunt and OpenTofu entries should already be present... +azure-cli = "2.77.0" +``` + +:::tip + +Remember that you can use `ls-remote` to list the available versions of the `azure-cli` tool. -5. `cd` to the root of where you wish to install Gruntwork Pipelines. Run the boilerplate tool to generate your repository structure: ```bash - boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/gitlab-pipelines-infrastructure-live-root/?ref=v3.1.0" --output-folder . --var-file vars.yaml --non-interactive +mise ls-remote azure-cli ``` - If you encounter SSH issues, verify your SSH access to GitHub: +::: + +Make sure to run `mise install` to install the `azure-cli` tool. + ```bash - ssh -T git@github.com - # or try cloning manually - git clone git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git +mise install ``` -6. Commit the changes: +If you haven't already, you'll want to authenticate to Azure using the `az` CLI. + ```bash - git add . - git commit -m "[skip ci] Add Gruntwork Pipelines" - git push origin gruntwork-pipelines - ``` +az login +``` + +:::note Progress Checklist + + + + +::: -7. Create a merge request in GitLab and review the changes. +### Provisioning the Azure resources -### Step 4: Install AWS OIDC Provider and IAM Roles for Pipelines +Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your Azure subscription. -1. Navigate to the `_global` folder under each account in your repository and review the Terragrunt files that were created: - - The GitLab OIDC identity provider in AWS. +To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: - :::note - If using a custom GitLab instance, ensure the `URL` and `audiences` inputs in this configuration are correct. - ::: +- `ARM_TENANT_ID` +- `ARM_SUBSCRIPTION_ID` - - IAM roles for your the account (`root-pipelines-plan` and `root-pipelines-apply`) +For example: -2. Apply these configurations to create the required AWS resources: ```bash - cd $$ACCOUNT_NAME$$/_global/ - terragrunt run-all plan - terragrunt run-all apply - ``` +export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" +export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" +``` + +:::note Progress Checklist + + + +::: + +First, make sure that everything is set up correctly by running a plan in the subscription directory. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process. + +::: + +Next, apply the changes to your subscription. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply +``` + +:::tip + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + +::: + +:::note Progress Checklist + + + +::: + +:::tip Troubleshooting Tips + +If you encounter issues during this step, please refer to the [Initial Apply Failure](#azure-initial-apply-failure) section. + +::: + +### Pulling the resources into state + +Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy +``` + +:::tip + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + +::: + +:::note Progress Checklist + + + +::: + + + + +## Creating `.gruntwork` HCL configurations + +Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). + +### The `repository` block + +The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. + +```hcl title=".gruntwork/repository.hcl" +repository { + deploy_branch_name = "main" +} +``` + +:::note Progress Checklist + + + + +::: + +### The `environment` block + +Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). + +For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. + + + + +```hcl title=".gruntwork/environment-an-aws-account.hcl" +environment "an_aws_account" { + filter { + paths = ["an-aws-account/*"] + } + + authentication { + aws_oidc { + account_id = "123456789012" + plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. + +::: + +:::tip + +Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to authenticate with AWS conveniently. + +::: + +:::note Progress Checklist + + + + + + + + +::: + + + + +```hcl title=".gruntwork/environment-an-azure-subscription.hcl" +environment "an_azure_subscription" { + filter { + paths = ["an-azure-subscription/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "11111111-1111-1111-1111-111111111111" + + plan_client_id = "33333333-3333-3333-3333-333333333333" + apply_client_id = "44444444-4444-4444-4444-444444444444" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. + +::: + +:::note Progress Checklist + + + + + + + + + + +::: + + + + +```hcl title=".gruntwork/environment-dev.hcl" +environment "dev" { + filter { + paths = ["dev/*"] + } + + authentication { + custom { + auth_provider_cmd = "./scripts/custom-auth-dev.sh" + } + } +} +``` + +:::tip + +Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. + +::: + +:::note Progress Checklist + + + + + + + + +::: + + + + +## Creating `.gitlab-ci.yml` + +Create a `.gitlab-ci.yml` file in the root of your `infrastructure-live` repository with the following content: + +```yaml title=".gitlab-ci.yml" +include: + - component: gitlab.com/gruntwork-io/pipelines-workflows/gitlab-ci@v4 + inputs: + stage: pipelines +``` + +:::info + +**For custom GitLab instances only**: If you are using a custom GitLab instance, you must update the component reference to point to your forked version of the pipelines-workflows project: + +```yaml title=".gitlab-ci.yml" +include: + - component: your-gitlab-instance.com/your-group/pipelines-workflows/gitlab-ci@v4 + inputs: + stage: pipelines +``` + +::: + +:::tip + +You can read the [Pipelines GitLab CI Component](https://gitlab.com/gruntwork-io/pipelines-workflows/-/blob/main/templates/gitlab-ci.yml) to learn how this GitLab CI component calls the Pipelines CLI to run your pipelines. + +::: + +:::note Progress Checklist + + + + +::: + +## Commit and push your changes + +Commit and push your changes to your repository. :::note - In the event you already have an OIDC provider for your SCM in the AWS account you can import the existing one: +You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow. - ``` - cd _global/$$ACCOUNT_NAME$$/gitlab-pipelines-openid-connect-provider/ - terragrunt import "aws_iam_openid_connect_provider.gitlab" "ARN_OF_EXISTING_OIDC_PROVIDER" - ``` +::: + +```bash +git add . +git commit -m "Add Pipelines GitLab CI workflow [skip ci]" +git push +``` + +:::note Progress Checklist + + + + +::: + +🚀 You've successfully added Gruntwork Pipelines to your new repository! + +## Next steps + +You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. +## Troubleshooting Tips - ::: +If you encounter one of the following issues, please refer to the troubleshooting guidance for each scenario. -### Step 5: Complete the Setup +### AWS Initial Apply Failure -1. Return to GitLab and merge the merge request with your changes. -2. Ensure that `PIPELINES_GITLAB_TOKEN` and `PIPELINES_GITLAB_READ_TOKEN` are set as a CI/CD variables in your group or project if you haven't already (see the [Machine Users setup guide](/2.0/docs/pipelines/installation/viamachineusers#gitlab) for details). -3. Test your setup by creating a new branch with some sample infrastructure code and creating a merge request. +If your initial apply fails, follow these steps to troubleshoot the issue: -## Next Steps + + + + + + -After setting up Pipelines, you can: +### Azure Initial Apply Failure -- [Deploy your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) -- [Learn how to run plan and apply operations](/2.0/docs/pipelines/guides/running-plan-apply) -- [Extend Pipelines with custom actions](/2.0/docs/pipelines/guides/extending-pipelines) +If your initial apply fails, follow these steps to troubleshoot the issue: -## Troubleshooting + + + + + + + -If you encounter issues during setup: +### GitLab CI/CD Issues -- Ensure your GitLab CI user has the correct permissions to your group and projects -- Verify that both `PIPELINES_GITLAB_TOKEN` and `PIPELINES_GITLAB_READ_TOKEN` are set correctly as CI/CD variables and are *NOT* marked as protected -- Confirm your GitLab group has been authorized by Gruntwork for Pipelines usage +If you encounter issues with GitLab CI/CD: -For further assistance, contact [support@gruntwork.io](mailto:support@gruntwork.io). + + + + diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index 114fa366e5..4833bdc91a 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -16,92 +16,80 @@ To configure Gruntwork Pipelines in a new GitHub repository, complete the follow Creating an `infrastructure-live` repository is fairly straightforward. First, create a new repository using the official GitHub documentation for [creating repositories](https://docs.github.com/en/repositories/creating-and-managing-repositories/creating-a-new-repository). Name the repository something like `infrastructure-live` and make it private (or internal). -Clone the repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). - -:::tip - -If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). +## Configuring SCM Access -::: +Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). -For example: +There are two ways to configure SCM access for Pipelines: -```bash -git clone git@github.com:acme/infrastructure-live.git -``` +1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). +2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitHub users who cannot use the GitHub App). :::note Progress Checklist - + + ::: -Once the repository is cloned locally, you'll want to create a `.mise.toml` file in the root of the repository to tell Pipelines what versions of Terragrunt and OpenTofu to use. +## Creating Cloud Resources for Pipelines -For example: +To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. -```toml title=".mise.toml" -[tools] -terragrunt = "0.88.0" -opentofu = "1.10.6" -``` +:::note -:::tip +If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. -Follow the official [mise installation guide](https://mise.jdx.dev/getting-started.html) to install it locally. +::: -You can get `mise` to lookup the versions available for a given tool by using the `ls-remote` command. +Clone your `infrastructure-live` repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). -```bash -mise ls-remote terragrunt -mise ls-remote opentofu -``` +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). ::: -Next, install Terragrunt and OpenTofu locally: +For example: ```bash -mise install +git clone git@github.com:acme/infrastructure-live.git +cd infrastructure-live ``` :::note Progress Checklist - - -::: - - + + -## Configuring SCM Access +::: -Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). +To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function. -There are two ways to configure SCM access for Pipelines: +The easiest way to install Boilerplate is to use `mise` to install it. -1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). -2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitHub users who cannot use the GitHub App). - -:::note Progress Checklist +:::tip - +If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html). ::: -## Creating Cloud Resources for Pipelines +```bash +mise use -g boilerplate@latest +``` -To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. +:::tip -:::note +If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions. -If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. +```bash +mise ls-remote boilerplate +``` ::: -This guide will assume a blank slate, so you can start by creating a new Git repository to track the infrastructure that you're provisioning here. - -:::tip +:::note Progress Checklist -If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + ::: @@ -126,190 +114,103 @@ If you want to peruse the catalog that's used in the bootstrap process, you can The process that we'll follow to get these resources ready for Pipelines is: -1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository +1. Set up the Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines 2. Use Terragrunt to provision these resources in your AWS account ### Bootstrap your `infrastructure-live` repository -To bootstrap your AWS account for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: - -```hcl title="root.hcl" -locals { - account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) - state_bucket_name = local.account_hcl.locals.state_bucket_name - - region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) - aws_region = local.region_hcl.locals.aws_region -} - -remote_state { - backend = "s3" - generate = { - path = "backend.tf" - if_exists = "overwrite" - } - config = { - bucket = local.state_bucket_name - region = local.aws_region - key = "${path_relative_to_include()}/tofu.tfstate" - encrypt = true - use_lockfile = true - } -} +To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function. -generate "provider" { - path = "provider.tf" - if_exists = "overwrite_terragrunt" - contents = < +You can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. -::: +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. -This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers and remote state configuration. +e.g. -```hcl title="name-of-account/account.hcl" -locals { - // This is the name of the S3 bucket that will be created for state storage. - // - // Make sure this is globally unique across all AWS accounts, as S3 bucket names must be globally unique. - // You will need to change this. - state_bucket_name = "your-unique-bucket-name-for-state" -} +```bash +boilerplate \ + --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitHubOrgName=acme' \ + --var 'GitHubRepoName=infrastructure-live' \ + --var 'AWSAccountID=123456789012' \ + --var 'AWSRegion=us-east-1' \ + --var 'StateBucketName=my-state-bucket' \ + --non-interactive ``` -:::note Progress Checklist - - - - - -::: - -This file is used by all units in the `name-of-account` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your account. +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. -```hcl title="name-of-account/_global/region.hcl" -locals { - aws_region = "us-east-1" -} +```yaml title="vars.yml" +AccountName: dev +GitHubOrgName: acme +GitHubRepoName: infrastructure-live +AWSAccountID: 123456789012 +AWSRegion: us-east-1 +StateBucketName: my-state-bucket ``` -:::tip - -This region configuration is being set because the AWS API needs to make API calls to _some_ AWS region, but all the resources are, in fact, global. - -The AWS IAM service is a global service, which is why we're storing the bootstrap resources in the `_global` directory. +```bash +boilerplate \ + --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` ::: :::note Progress Checklist - - - - - + ::: -This file is used by all units in the `_global` directory to ensure that Terragrunt configurations know which AWS region to use for the OpenTofu AWS provider configuration. +Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root of the repository after scaffolding should already be set to the versions you want for Terragrunt and OpenTofu): -```hcl title="name-of-account/_global/bootstrap/terragrunt.stack.hcl" -locals { - // Read from parent configurations instead of defining these values locally - // so that other stacks and units in this directory can reuse the same configurations. - account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) -} - -stack "bootstrap" { - source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/aws/github/pipelines-bootstrap?ref=v1.0.0" - path = "bootstrap" - - values = { - // Set the OIDC resource prefix you want to use for your account. - // - // This will be used to determine the names of the OIDC resources like the IAM roles that are created. - // e.g. `pipelines-plan`, `pipelines-apply`, etc. - oidc_resource_prefix = "pipelines" - - // Set the organization name you want AWS to trust for OIDC. - github_org_name = "acme" - - // Set the repository name you want AWS to trust for OIDC. - github_repo_name = "infrastructure-live" - - // Read from parent configurations instead of defining these values locally. - state_bucket_name = local.account_hcl.locals.state_bucket_name - } -} +```bash +mise install ``` :::note Progress Checklist - - - - - - + ::: -You'll also want to make sure that you add the `aws` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with AWS for the bootstrapping process. +### Provisioning the resources -```toml title=".mise.toml" -[tools] -# The Terragrunt and OpenTofu entries should already be present... -awscli = "2.31.6" -``` +Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account. :::tip -Remember that you can use `ls-remote` to list the available versions of the `awscli` tool. +Make sure that you're authenticated with AWS locally before proceeding. -```bash -mise ls-remote awscli -``` +You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role. ::: -Make sure to run `mise install` to install the `awscli` tool. +First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the first AWS account you want to bootstrap. -```bash -mise install +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache plan ``` -If you haven't already, you'll want to authenticate to AWS using the `aws` CLI. - -```bash -aws configure -``` +:::tip -:::note Progress Checklist +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). - - - ::: -### Provisioning the resources - -Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account. - -First, make sure that everything is set up correctly by running a plan in the bootstrap directory. - -```bash title="name-of-account/_global/bootstrap" -terragrunt run --all --non-interactive --provider-cache plan -``` - -:::tip +:::note Progress Checklist -We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + ::: @@ -321,13 +222,7 @@ terragrunt run --all --non-interactive --provider-cache apply :::note Progress Checklist - -::: - -:::tip Troubleshooting Tips - -If you encounter issues during this step, please refer to the [AWS Initial Apply Failure](#aws-initial-apply-failure) section. ::: @@ -361,135 +256,81 @@ The process that we'll follow to get these resources ready for Pipelines is: 1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository 2. Use Terragrunt to provision these resources in your Azure subscription -3. Pull the bootstrap resources into state, using the storage account we just provisioned +3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned +4. Pull the bootstrap resources into state, now that we have configured a remote state backend ### Bootstrap your `infrastructure-live` repository -To bootstrap your Azure subscription for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: +To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function. -```hcl title="root.hcl" -generate "provider" { - path = "provider.tf" - if_exists = "overwrite" - contents = < +You can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. -::: +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. -This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers. +e.g. -```hcl title="name-of-subscription/sub.hcl -locals { - // This is the name of the resource group that will be created for state storage. - // - // You don't need to change this if you don't want to (and you don't already have a resource group named this). - state_resource_group_name = "pipelines-rg" - - // Make sure this is less than 24 characters, and only contains lowercase letters and numbers - // to obey Azure's naming requirements. - // - // You will need to change this. - state_storage_account_name = "name-of-storage-account-you-want-to-use-for-state" - - // This is the name of the container you'll use for state storage. - // - // You don't need to change this if you don't want to. - state_storage_container_name = "tfstate" -} +```bash +boilerplate \ + --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitHubOrgName=acme' \ + --var 'GitHubRepoName=infrastructure-live' \ + --var 'SubscriptionName=dev' \ + --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \ + --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \ + --var 'AzureLocation=East US' \ + --var 'StateResourceGroupName=pipelines-rg' \ + --var 'StateStorageAccountName=my-storage-account' \ + --var 'StateStorageContainerName=tfstate' \ + --non-interactive ``` -:::note Progress Checklist +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + +```yaml title="vars.yml" +AccountName: dev +GitHubOrgName: acme +GitHubRepoName: infrastructure-live +AzureTenantID: 00000000-0000-0000-0000-000000000000 +AzureSubscriptionID: 11111111-1111-1111-1111-111111111111 +AzureLocation: East US +StateResourceGroupName: pipelines-rg +StateStorageAccountName: my-storage-account +StateStorageContainerName: tfstate +``` - - - - +```bash +boilerplate \ + --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` ::: -This file is used by all units in the `name-of-subscription` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your subscription. - -```hcl title="name-of-subscription/bootstrap/terragrunt.stack.hcl" -locals { - // Read from parent configurations instead of defining these values locally - // so that other stacks and units in this directory can reuse the same configurations. - sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) -} - -stack "bootstrap" { - source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/github/pipelines-bootstrap?ref=v1.0.0" - path = "bootstrap" - - values = { - // Set the location to the location you want to bootstrap your subscription in. - location = "East US" - - // Read from parent configurations instead of defining these values locally. - state_resource_group_name = local.sub_hcl.locals.state_resource_group_name - state_storage_account_name = local.sub_hcl.locals.state_storage_account_name - state_storage_container_name = local.sub_hcl.locals.state_storage_container_name - - // Set the organization name you want Azure to trust for OIDC. - github_org_name = "acme" - - // Set the repository name you want Azure to trust for OIDC. - github_repo_name = "infrastructure-live" - - // Set the OIDC resource prefix you want to use for your subscription. - // - // This will be used to determine the names of the OIDC resources like the Entra ID Applications that are created. - // e.g. `pipelines`-plan, `pipelines`-apply, etc. - oidc_resource_prefix = "pipelines" - } -} -``` - :::note Progress Checklist - - - - - - - + ::: -You'll also want to make sure that you add the `azure` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with Azure for the bootstrapping process. - -```toml title=".mise.toml" -[tools] -# The Terragrunt and OpenTofu entries should already be present... -azure-cli = "2.77.0" -``` - -:::tip - -Remember that you can use `ls-remote` to list the available versions of the `azure-cli` tool. +Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root of the repository after scaffolding should already be set to the versions you want for Terragrunt and OpenTofu): ```bash -mise ls-remote azure-cli +mise install ``` -::: - -Make sure to run `mise install` to install the `azure-cli` tool. +### Provisioning the resources -```bash -mise install -``` +Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your Azure subscription. If you haven't already, you'll want to authenticate to Azure using the `az` CLI. @@ -499,14 +340,10 @@ az login :::note Progress Checklist - - -::: -### Provisioning the resources +::: -Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your Azure subscription. To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: @@ -538,244 +375,154 @@ We're using the `--provider-cache` flag here to ensure that we don't re-download ::: -Next, apply the changes to your subscription. - -```bash title="name-of-subscription" -terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply -``` - -:::tip - -We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. - -::: - :::note Progress Checklist - -::: - -:::tip Troubleshooting Tips - -If you encounter issues during this step, please refer to the [Initial Apply Failure](#azure-initial-apply-failure) section. ::: -### Pulling the resources into state - -Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. +Next, apply the changes to your subscription. ```bash title="name-of-subscription" -terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply ``` :::tip -We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. ::: :::note Progress Checklist - - + ::: -
-
- -## Creating `.gruntwork` HCL configurations +### Finalizing Terragrunt configurations -Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). +Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned. -### The `repository` block +First, edit the `root.hcl` file in the root of your `infrastructure-live` repository to leverage the storage account we just provisioned. -The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. +```hcl title="root.hcl" +locals { + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) -```hcl title=".gruntwork/repository.hcl" -repository { - deploy_branch_name = "main" + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name } -``` - -:::note Progress Checklist - - +# FIXME: Uncomment the code below when you've successfully bootstrapped Pipelines state. +# +# remote_state { +# backend = "azurerm" +# generate = { +# path = "backend.tf" +# if_exists = "overwrite" +# } +# config = { +# resource_group_name = local.state_resource_group_name +# storage_account_name = local.state_storage_account_name +# container_name = local.state_storage_container_name +# key = "${path_relative_to_include()}/tofu.tfstate" +# } +# } -::: - -### The `environment` block - -Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). - -For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. - - - +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < - - - - - + ::: - - +Next, finalize the `.gruntwork/environment-.hcl` file in the root of your `infrastructure-live` repository to reference the IDs for the applications we just provisioned. -```hcl title=".gruntwork/environment-an-azure-subscription.hcl" -environment "an_azure_subscription" { +```hcl title=".gruntwork/environment-.hcl" +environment "dev" { filter { - paths = ["an-azure-subscription/*"] + paths = ["dev/*"] } authentication { azure_oidc { - tenant_id = "00000000-0000-0000-0000-000000000000" - subscription_id = "11111111-1111-1111-1111-111111111111" + tenant_id = "11111111-1111-1111-1111-111111111111" + subscription_id = "00000000-0000-0000-0000-000000000000" - plan_client_id = "33333333-3333-3333-3333-333333333333" - apply_client_id = "44444444-4444-4444-4444-444444444444" + plan_client_id = "" # FIXME: Fill in the client ID for the plan application after bootstrapping + apply_client_id = "" # FIXME: Fill in the client ID for the apply application after bootstrapping } } } ``` -:::tip - -Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. - -::: - -:::note Progress Checklist +You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`. - - - - - - - - - -::: +```bash +terragrunt stack output +``` - - +The relevant bits that you want to extract from the stack output are the following: -```hcl title=".gruntwork/environment-dev.hcl" -environment "dev" { - filter { - paths = ["dev/*"] +```hcl +bootstrap = { + apply_app = { + client_id = "33333333-3333-3333-3333-333333333333" } - - authentication { - custom { - auth_provider_cmd = "./scripts/custom-auth-dev.sh" - } + plan_app = { + client_id = "44444444-4444-4444-4444-444444444444" } } ``` -:::tip - -Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. - -::: +You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file. :::note Progress Checklist - - - - - - + + ::: - - +### Pulling the resources into state -## Creating `.github/workflows/pipelines.yml` - -Create a `.github/workflows/pipelines.yml` file in the root of your `infrastructure-live` repository with the following content: - -```yaml title=".github/workflows/pipelines.yml" -name: Pipelines -run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}" -on: - push: - branches: - - main - paths-ignore: - - ".github/**" - pull_request: - types: - - opened - - synchronize - - reopened - paths-ignore: - - ".github/**" - -# Permissions to assume roles and create pull requests -permissions: - id-token: write - contents: write - pull-requests: write - -jobs: - GruntworkPipelines: - uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@v4 +Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy ``` :::tip -You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwork-io/pipelines-workflows/blob/main/.github/workflows/pipelines.yml) to learn how this GitHub Actions workflow calls the Pipelines CLI to run your pipelines. +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. ::: :::note Progress Checklist - - + ::: + + + ## Commit and push your changes Commit and push your changes to your repository. @@ -804,27 +551,3 @@ git push ## Next steps You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. - -## Troubleshooting Tips - -If you encounter one of the following issues, please refer to the troubleshooting guidance for each scenario. - -### AWS Initial Apply Failure - -If your initial apply fails, follow these steps to troubleshoot the issue: - - - - - - - -### Azure Initial Apply Failure - -If your initial apply fails, follow these steps to troubleshoot the issue: - - - - - - diff --git a/sidebars/docs.js b/sidebars/docs.js index 6f3df20be4..e90d3dd97f 100644 --- a/sidebars/docs.js +++ b/sidebars/docs.js @@ -279,12 +279,7 @@ const sidebar = [ collapsed: false, items: [ { - label: "Creating a New GitLab Project with Pipelines", - type: "doc", - id: "2.0/docs/pipelines/installation/addingnewgitlabrepo", - }, - { - label: "Adding Pipelines to an Existing GitLab Project", + label: "Bootstrap Pipelines in a new GitLab Project", type: "doc", id: "2.0/docs/pipelines/installation/addinggitlabrepo", }, From 73800d9de603ef5fdb7559a4942fbf014f0f1a51 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 10:30:30 -0400 Subject: [PATCH 26/39] feat: Adding instructions for additional accounts and subscriptions --- .../pipelines/installation/addingnewrepo.mdx | 426 +++++++++++++++++- 1 file changed, 415 insertions(+), 11 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index 4833bdc91a..4530e8842a 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -114,8 +114,9 @@ If you want to peruse the catalog that's used in the bootstrap process, you can The process that we'll follow to get these resources ready for Pipelines is: -1. Set up the Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines +1. Set up the Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines in a single AWS account 2. Use Terragrunt to provision these resources in your AWS account +3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines ### Bootstrap your `infrastructure-live` repository @@ -123,7 +124,7 @@ To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to sca ```bash boilerplate \ - --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ --output-folder . ``` @@ -137,7 +138,7 @@ e.g. ```bash boilerplate \ - --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ --output-folder . \ --var 'AccountName=dev' \ --var 'GitHubOrgName=acme' \ @@ -161,7 +162,7 @@ StateBucketName: my-state-bucket ```bash boilerplate \ - --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ --output-folder . \ --var-file vars.yml \ --non-interactive @@ -226,6 +227,104 @@ terragrunt run --all --non-interactive --provider-cache apply ::: +### Optional: Bootstrapping additional AWS accounts + +If you have multiple AWS accounts, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process. + +For each additional account you want to bootstrap, you'll use Boilerplate in the root of your `infrastructure-live` repository to scaffold out the necessary content for just that account. + +:::tip + +If you are going to bootstrap more AWS accounts, you'll probably want to commit your existing changes before proceeding. + +```bash +git add . +git commit -m "Add core Pipelines scaffolding [skip ci]" +``` + +The `[skip ci]` in the commit message is just in-case you push your changes up to your repository at this state, as you don't want to trigger Pipelines yet. + +::: + +Just like before, you'll use Boilerplate to scaffold out the necessary content for just that account. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \ + --output-folder . +``` + +:::tip + +Again, you can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=prod' \ + --var 'AWSAccountID=987654321012' \ + --var 'AWSRegion=us-east-1' \ + --var 'StateBucketName=my-prod-state-bucket' \ + --var 'GitHubOrgName=acme' \ + --var 'GitHubRepoName=infrastructure-live' \ + --non-interactive +``` + +If you prefer to store the values in a YAML file and pass it to Boilerplate using the `--var-file` flag, you can do so like this: + +```yaml title="vars.yml" +AccountName: prod +AWSAccountID: 987654321012 +AWSRegion: us-east-1 +StateBucketName: my-prod-state-bucket +GitHubOrgName: acme +GitHubRepoName: infrastructure-live +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` + +::: + +:::note Progress Checklist + + + +::: + +Once you've scaffolded out the additional accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts. + +:::tip + +Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it. + +::: + +For each account you want to bootstrap, you'll need to run the following commands: + +```bash +cd /_global/bootstrap +terragrunt run --all --non-interactive --provider-cache plan +terragrunt run --all --non-interactive --provider-cache apply +``` + +:::note Progress Checklist + + + + +::: + @@ -254,10 +353,11 @@ If you want to peruse the catalog that's used in the bootstrap process, you can The process that we'll follow to get these resources ready for Pipelines is: -1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository +1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines in a single Azure subscription 2. Use Terragrunt to provision these resources in your Azure subscription 3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned 4. Pull the bootstrap resources into state, now that we have configured a remote state backend +5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines ### Bootstrap your `infrastructure-live` repository @@ -265,7 +365,7 @@ To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to sca ```bash boilerplate \ - --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ --output-folder . ``` @@ -279,7 +379,7 @@ e.g. ```bash boilerplate \ - --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ --output-folder . \ --var 'AccountName=dev' \ --var 'GitHubOrgName=acme' \ @@ -289,7 +389,7 @@ boilerplate \ --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \ --var 'AzureLocation=East US' \ --var 'StateResourceGroupName=pipelines-rg' \ - --var 'StateStorageAccountName=my-storage-account' \ + --var 'StateStorageAccountName=mysa' \ --var 'StateStorageContainerName=tfstate' \ --non-interactive ``` @@ -310,7 +410,7 @@ StateStorageContainerName: tfstate ```bash boilerplate \ - --template-url 'https://github.com/gruntwork-io/terragrunt-scale-catalog.git//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \ --output-folder . \ --var-file vars.yml \ --non-interactive @@ -446,6 +546,44 @@ EOF Uncomment the section that defines the remote state backend, so that you can pull the resources provisioned so far into state. +```hcl title="root.hcl" +locals { + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) + + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name +} + +remote_state { + backend = "azurerm" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + resource_group_name = local.state_resource_group_name + storage_account_name = local.state_storage_account_name + container_name = local.state_storage_container_name + key = "${path_relative_to_include()}/tofu.tfstate" + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < @@ -462,8 +600,8 @@ environment "dev" { authentication { azure_oidc { - tenant_id = "11111111-1111-1111-1111-111111111111" - subscription_id = "00000000-0000-0000-0000-000000000000" + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "11111111-1111-1111-1111-111111111111" plan_client_id = "" # FIXME: Fill in the client ID for the plan application after bootstrapping apply_client_id = "" # FIXME: Fill in the client ID for the apply application after bootstrapping @@ -520,6 +658,272 @@ We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiti ::: +### Optional: Bootstrapping additional Azure subscriptions + +If you have multiple Azure subscriptions, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process. + +For each additional subscription you want to bootstrap, you'll use Boilerplate in the root of your `infrastructure-live` repository to scaffold out the necessary content for just that subscription. + +:::tip + +If you are going to bootstrap more Azure subscriptions, you'll probably want to commit your existing changes before proceeding. + +```bash +git add . +git commit -m "Add additional Azure subscriptions [skip ci]" +``` + +::: + +Just like before, you'll use Boilerplate to scaffold out the necessary content for just that subscription. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \ + --output-folder . +``` + +:::tip + +Again, you can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +::: + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=prod' \ + --var 'GitHubOrgName=acme' \ + --var 'GitHubRepoName=infrastructure-live' \ + --var 'SubscriptionName=prod' \ + --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \ + --var 'AzureSubscriptionID=99999999-9999-9999-9999-999999999999' \ + --var 'AzureLocation=East US' \ + --var 'StateResourceGroupName=pipelines-rg' \ + --var 'StateStorageAccountName=myprodsa' \ + --var 'StateStorageContainerName=tfstate' \ + --non-interactive +``` + +If you prefer to store the values in a YAML file and pass it to Boilerplate using the `--var-file` flag, you can do so like this: + +```yaml title="vars.yml" +AccountName: prod +GitHubOrgName: acme +GitHubRepoName: infrastructure-live +SubscriptionName: prod +AzureTenantID: 00000000-0000-0000-0000-000000000000 +AzureSubscriptionID: 99999999-9999-9999-9999-999999999999 +AzureLocation: East US +StateResourceGroupName: pipelines-rg +StateStorageAccountName: myprodsa +StateStorageContainerName: tfstate +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` + +:::note Progress Checklist + + + +::: + +To avoid issues with the remote state backend not existing yet, you'll want to comment out your remote state backend configurations in your `root.hcl` file before you start the bootstrap process for these new subscriptions. + +```hcl title="root.hcl" +locals { + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) + + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name +} + +# FIXME: Temporarily commented out again, pending successful bootstrap of the new subscription(s). +# +# remote_state { +# backend = "azurerm" +# generate = { +# path = "backend.tf" +# if_exists = "overwrite" +# } +# config = { +# resource_group_name = local.state_resource_group_name +# storage_account_name = local.state_storage_account_name +# container_name = local.state_storage_container_name +# key = "${path_relative_to_include()}/tofu.tfstate" +# } +# } + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +Just like before, you can use Terragrunt to provision the resources in each of these subscriptions. + +For each subscription you want to bootstrap, you'll need to run the following commands: + +```bash +cd /_global/bootstrap +terragrunt run --all --non-interactive --provider-cache plan +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply +``` + +:::tip + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + +::: + +:::note Progress Checklist + + + + +::: + +Next, you can pull the resources into state using the storage account we just provisioned. + +First, edit the `root.hcl` file in the root of your `infrastructure-live` repository to uncomment the remote state backend configurations you commented out earlier. + +```hcl title="root.hcl" +locals { + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) + + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name +} + +remote_state { + backend = "azurerm" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + resource_group_name = local.state_resource_group_name + storage_account_name = local.state_storage_account_name + container_name = local.state_storage_container_name + key = "${path_relative_to_include()}/tofu.tfstate" + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +Next, you can pull the resources into state using the storage account we just provisioned. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy +``` + +:::tip + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + +::: + +:::note Progress Checklist + + + +::: + +Finally, we can edit each of the `.gruntwork/environment-.hcl` files in the root of your `infrastructure-live` repository to reference the IDs for the applications we just provisioned. + +```hcl title=".gruntwork/environment-.hcl" +environment "prod" { + filter { + paths = ["prod/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "99999999-9999-9999-9999-999999999999" + + plan_client_id = "" # FIXME: Fill in the client ID for the plan application after bootstrapping + apply_client_id = "" # FIXME: Fill in the client ID for the apply application after bootstrapping + } + } +} +``` + +You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`. + +```bash +terragrunt stack output +``` + +The relevant bits that you want to extract from the stack output are the following: + +```hcl +bootstrap = { + apply_app = { + client_id = "55555555-5555-5555-5555-555555555555" + } + plan_app = { + client_id = "66666666-6666-6666-6666-666666666666" + } +} +``` + +You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file. + +:::note Progress Checklist + + + + +::: + From c29ecb3d9a7d49b9dd9c97899a59fff05901455f Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 10:37:07 -0400 Subject: [PATCH 27/39] fix: Preventing ToC from breaking by using h3 tags --- .../pipelines/installation/addingnewrepo.mdx | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index 4530e8842a..3af96f9b28 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -93,6 +93,8 @@ mise ls-remote boilerplate ::: +### Cloud-specific bootstrap instructions + @@ -118,7 +120,8 @@ The process that we'll follow to get these resources ready for Pipelines is: 2. Use Terragrunt to provision these resources in your AWS account 3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines -### Bootstrap your `infrastructure-live` repository +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap your `infrastructure-live` repository

To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function. @@ -171,7 +174,9 @@ boilerplate \ ::: :::note Progress Checklist + + ::: Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root of the repository after scaffolding should already be set to the versions you want for Terragrunt and OpenTofu): @@ -185,7 +190,8 @@ mise install ::: -### Provisioning the resources +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provisioning the resources

Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account. @@ -227,7 +233,8 @@ terragrunt run --all --non-interactive --provider-cache apply ::: -### Optional: Bootstrapping additional AWS accounts +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Optional: Bootstrapping additional AWS accounts

If you have multiple AWS accounts, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process. @@ -359,7 +366,8 @@ The process that we'll follow to get these resources ready for Pipelines is: 4. Pull the bootstrap resources into state, now that we have configured a remote state backend 5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines -### Bootstrap your `infrastructure-live` repository +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap your `infrastructure-live` repository

To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function. @@ -428,7 +436,8 @@ Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root mise install ``` -### Provisioning the resources +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provisioning the resources

Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your Azure subscription. @@ -498,7 +507,8 @@ We're adding the `--no-stack-generate` flag here, as Terragrunt will already hav ::: -### Finalizing Terragrunt configurations +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Finalizing Terragrunt configurations

Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned. @@ -638,7 +648,8 @@ You can use those values to set the values for `plan_client_id` and `apply_clien ::: -### Pulling the resources into state +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Pulling the resources into state

Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. @@ -658,7 +669,8 @@ We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiti ::: -### Optional: Bootstrapping additional Azure subscriptions +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Optional: Bootstrapping additional Azure subscriptions

If you have multiple Azure subscriptions, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process. From da545d0f9f1185945a95a1461e86ae06f5ef8fdd Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 10:48:37 -0400 Subject: [PATCH 28/39] fix: Adding existing guide docs --- .../installation/addingexistingrepo.mdx | 478 ++++++++++++++---- 1 file changed, 372 insertions(+), 106 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx index 3baf0e8a58..5b25a87270 100644 --- a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx @@ -94,9 +94,9 @@ There are two ways to configure SCM access for Pipelines: ::: -## Bootstrapping Core Infrastructure +## Bootstrapping Cloud Infrastructure -If your cloud accounts/subscriptions don't already have all the required OIDC and state management resources, you'll need to bootstrap them. This section provides the infrastructure code needed to set up these resources. +If your AWS accounts / Azure subscriptions don't already have all the required OIDC and state management resources, you'll need to bootstrap them. This section provides the infrastructure code needed to set up these resources. :::tip @@ -106,11 +106,77 @@ If you have some of them provisioned, but not all, you can decide to either dest ::: +### Prepare Your Repository + +Clone your repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository) if you haven't already. + +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + +::: + +For example: + +```bash +git clone git@github.com:acme/infrastructure-live.git +cd infrastructure-live +``` + +:::note Progress Checklist + + + + +::: + +To bootstrap your repository, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function. + +The easiest way to install Boilerplate is to use `mise` to install it. + +:::tip + +If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html). + +::: + +```bash +mise use -g boilerplate@latest +``` + +:::tip + +If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions. + +```bash +mise ls-remote boilerplate +``` + +::: + +:::note Progress Checklist + + + +::: + +If you don't already have Terragrunt and OpenTofu installed locally, you can install them using `mise`: + +```bash +mise use -g terragrunt@latest opentofu@latest +``` + +:::note Progress Checklist + + + +::: + +### Cloud-specific bootstrap instructions + -### AWS Bootstrap Resources - The resources you need provisioned in AWS to start managing resources with Pipelines are: 1. An OpenID Connect (OIDC) provider @@ -127,7 +193,14 @@ If you want to peruse the catalog that's used in the bootstrap process, you can ::: -### Bootstrap Your Repository for AWS +The process that we'll follow to get these resources ready for Pipelines is: + +1. Use Boilerplate to scaffold bootstrap configurations in your repository for each AWS account +2. Use Terragrunt to provision these resources in your AWS accounts +3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap Your Repository for AWS

First, confirm that you have a `root.hcl` file in the root of your repository that looks something like this: @@ -168,65 +241,94 @@ EOF If you don't have a `root.hcl` file, you might need to customize the bootstrapping process, as the Terragrunt scale catalog expects a `root.hcl` file in the root of the repository. Please contact [Gruntwork support](/support) for assistance if you need help. -For each AWS account that needs bootstrapping, create the following structure in your repository: +For each AWS account that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. -```hcl title="/_global/bootstrap/terragrunt.stack.hcl" -locals { - # You may need to adjust this path based on your repository structure - account_vars = read_terragrunt_config(find_in_parent_folders("account.hcl", "terragrunt.hcl")) -} +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0' \ + --output-folder . +``` -stack "bootstrap" { - source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/aws/github/pipelines-bootstrap?ref=v1.0.0" - path = "bootstrap" +:::tip - values = { - # Set the OIDC resource prefix you want to use for your account. - # This is used to determine the names of the OIDC resources like the IAM roles that are created. - # e.g. `pipelines-plan`, `pipelines-apply`, etc. - oidc_resource_prefix = "pipelines" +You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. - # Set the organization name you want AWS to trust for OIDC. - github_org_name = "your-github-org" +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. - # Set the repository name you want AWS to trust for OIDC. - github_repo_name = "your-repo-name" +e.g. - # Set the name of the S3 bucket you want to use for state storage (must be globally unique) - state_bucket_name = "your-unique-state-bucket-name" - } -} +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitHubOrgName=acme' \ + --var 'GitHubRepoName=infrastructure-live' \ + --var 'AWSAccountID=123456789012' \ + --var 'AWSRegion=us-east-1' \ + --var 'StateBucketName=my-state-bucket' \ + --non-interactive +``` + +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + +```yaml title="vars.yml" +AccountName: dev +GitHubOrgName: acme +GitHubRepoName: infrastructure-live +AWSAccountID: 123456789012 +AWSRegion: us-east-1 +StateBucketName: my-state-bucket +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive ``` +::: + :::note Progress Checklist - - - + ::: -### Provision AWS Bootstrap Resources +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provision AWS Bootstrap Resources

-For each account that needs bootstrapping: +Once you've scaffolded out the accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts. -1. Navigate to the bootstrap directory: +:::tip - ```bash - cd /_global/bootstrap - ``` +Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it. -2. Plan the bootstrap resources: +You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role. - ```bash - terragrunt run --all --non-interactive plan - ``` +::: -3. Apply the bootstrap resources: +For each account you want to bootstrap, you'll need to run the following commands: - ```bash - terragrunt run --all --non-interactive apply - ``` +First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the AWS account you want to bootstrap. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + +Next, apply the changes to your account. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache apply +``` :::note Progress Checklist @@ -238,15 +340,20 @@ For each account that needs bootstrapping:
-### Azure Bootstrap Resources - The resources you need provisioned in Azure to start managing resources with Pipelines are: 1. An Azure Resource Group for OpenTofu state resources -2. An Azure Storage Account and Container for OpenTofu state storage -3. Entra ID Applications for plan and apply operations -4. Federated Identity Credentials for OIDC authentication -5. Service Principals with appropriate role assignments + 1. An Azure Storage Account in that resource group for OpenTofu state storage + 1. An Azure Storage Container in that storage account for OpenTofu state storage +2. An Entra ID Application to use for plans + 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + 2. A role assignment for the service principal to access the Azure Storage Account +3. An Entra ID Application to use for applies + 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription :::tip Don't Panic! @@ -256,94 +363,253 @@ If you want to peruse the catalog that's used in the bootstrap process, you can ::: -### Bootstrap Your Repository for Azure +The process that we'll follow to get these resources ready for Pipelines is: -For each Azure subscription that needs bootstrapping, create the following structure: +1. Use Boilerplate to scaffold bootstrap configurations in your repository for each Azure subscription +2. Use Terragrunt to provision these resources in your Azure subscription +3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned +4. Pull the bootstrap resources into state, now that we have configured a remote state backend +5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines -```hcl title="/bootstrap/terragrunt.stack.hcl" -stack "bootstrap" { - source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/github/pipelines-bootstrap?ref=v1.0.0" - path = "bootstrap" +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap Your Repository for Azure

- values = { - # Set the location for your resources - location = "East US" +For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. - # State storage configuration - state_resource_group_name = "pipelines-rg" - state_storage_account_name = "your-unique-storage-account" - state_storage_container_name = "tfstate" +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \ + --output-folder . +``` - # OIDC configuration - github_org_name = "your-github-org" - github_repo_name = "your-repo-name" - oidc_resource_prefix = "pipelines" - } -} +:::tip + +You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitHubOrgName=acme' \ + --var 'GitHubRepoName=infrastructure-live' \ + --var 'SubscriptionName=dev' \ + --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \ + --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \ + --var 'AzureLocation=East US' \ + --var 'StateResourceGroupName=pipelines-rg' \ + --var 'StateStorageAccountName=mysa' \ + --var 'StateStorageContainerName=tfstate' \ + --non-interactive ``` +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + +```yaml title="vars.yml" +AccountName: dev +GitHubOrgName: acme +GitHubRepoName: infrastructure-live +SubscriptionName: dev +AzureTenantID: 00000000-0000-0000-0000-000000000000 +AzureSubscriptionID: 11111111-1111-1111-1111-111111111111 +AzureLocation: East US +StateResourceGroupName: pipelines-rg +StateStorageAccountName: my-storage-account +StateStorageContainerName: tfstate +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` + +::: + :::note Progress Checklist - - - - + ::: -### Provision Azure Bootstrap Resources +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provision Azure Bootstrap Resources

+ +Once you've scaffolded out the subscriptions you want to bootstrap, you can use Terragrunt to provision the resources in your Azure subscription. + +If you haven't already, you'll want to authenticate to Azure using the `az` CLI. + +```bash +az login +``` + +:::note Progress Checklist + + + +::: -For each subscription that needs bootstrapping: -1. Set the environment variables used by the Azure provider to authenticate in a given subscription: +To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: - ```bash - export ARM_TENANT_ID="your-tenant-id" - export ARM_SUBSCRIPTION_ID="your-subscription-id" - ``` +- `ARM_TENANT_ID` +- `ARM_SUBSCRIPTION_ID` -2. Navigate to the bootstrap directory: +For example: - ```bash - cd /bootstrap - ``` +```bash +export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" +export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" +``` -3. Plan the bootstrap resources: +:::note Progress Checklist - ```bash - terragrunt run --all --non-interactive --provider-cache plan - ``` + + +::: - :::tip +First, make sure that everything is set up correctly by running a plan in the subscription directory. - We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache plan +``` - ::: +:::tip -4. Apply the bootstrap resources: +We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). - ```bash - terragrunt run --all --non-interactive --provider-cache apply - ``` +::: -5. Migrate state to remote storage: +:::note Progress Checklist - ```bash - terragrunt run --all --non-interactive --provider-cache -- init -migrate-state -force-copy - ``` + - :::tip +::: - We're using the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. +Next, apply the changes to your subscription. - ::: +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply +``` + +:::tip + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + +::: + +:::note Progress Checklist + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Finalizing Terragrunt configurations

+ +Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned. + +First, edit the `root.hcl` file in the root of your repository to leverage the storage account we just provisioned. + +If your `root.hcl` file doesn't already have a remote state backend configuration, you'll need to add one that looks like this: + +```hcl title="root.hcl" +locals { + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) + + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name +} + +remote_state { + backend = "azurerm" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + resource_group_name = local.state_resource_group_name + storage_account_name = local.state_storage_account_name + container_name = local.state_storage_container_name + key = "${path_relative_to_include()}/tofu.tfstate" + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +Next, finalize the `.gruntwork/environment-.hcl` file in the root of your repository to reference the IDs for the applications we just provisioned. + +You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`. + +```bash +terragrunt stack output +``` + +The relevant bits that you want to extract from the stack output are the following: + +```hcl +bootstrap = { + apply_app = { + client_id = "33333333-3333-3333-3333-333333333333" + } + plan_app = { + client_id = "44444444-4444-4444-4444-444444444444" + } +} +``` + +You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file. + +:::note Progress Checklist + + + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Pulling the resources into state

+ +Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy +``` + +:::tip + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + +::: :::note Progress Checklist - - - - + ::: From a886280b48904d0746ed446fc4aac1517171646f Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 11:00:50 -0400 Subject: [PATCH 29/39] fix: Redoing GitLab install instructions for parity with GitHub --- .../installation/addinggitlabrepo.md | 838 ------------------ .../installation/addinggitlabrepo.mdx | 463 ++++++++++ 2 files changed, 463 insertions(+), 838 deletions(-) delete mode 100644 docs/2.0/docs/pipelines/installation/addinggitlabrepo.md create mode 100644 docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx diff --git a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md deleted file mode 100644 index 6085165183..0000000000 --- a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md +++ /dev/null @@ -1,838 +0,0 @@ -# Bootstrap Pipelines in a New GitLab Repository - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import PersistentCheckbox from '/src/components/PersistentCheckbox'; - -To configure Gruntwork Pipelines in a new GitLab repository, complete the following steps (which are explained in detail below): - -1. Create an `infrastructure-live` repository. -2. Configure machine user tokens for GitLab access, or ensure that the appropriate machine user tokens are set up as repository or organization secrets. -3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. -4. Create `.gitlab-ci.yml` to tell your GitLab CI/CD pipeline how to run your pipelines. -5. Commit and push your changes to your repository. - -## Creating the infrastructure-live repository - -Creating an `infrastructure-live` repository is fairly straightforward. First, create a new repository using the official GitLab documentation for [creating repositories](https://docs.gitlab.com/user/project/repository/). Name the repository something like `infrastructure-live` and make it private (or internal). - -Clone the repository to your local machine using [Git](https://docs.gitlab.com/user/project/repository/index.html#clone-a-repository). - -:::tip - -If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). - -::: - -For example: - -```bash -git clone git@gitlab.com:acme/infrastructure-live.git -``` - -:::note Progress Checklist - - - -::: - -Once the repository is cloned locally, you'll want to create a `.mise.toml` file in the root of the repository to tell Pipelines what versions of Terragrunt and OpenTofu to use. - -For example: - -```toml title=".mise.toml" -[tools] -terragrunt = "0.88.0" -opentofu = "1.10.6" -``` - -:::tip - -Follow the official [mise installation guide](https://mise.jdx.dev/getting-started.html) to install it locally. - -You can get `mise` to lookup the versions available for a given tool by using the `ls-remote` command. - - ```bash -mise ls-remote terragrunt -mise ls-remote opentofu -``` - -::: - -Next, install Terragrunt and OpenTofu locally: - - ```bash -mise install -``` - -:::note Progress Checklist - - - -::: - -## Configuring SCM Access - -Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). - -For GitLab, you'll need to configure SCM access using [machine users](/2.0/docs/pipelines/installation/viamachineusers.md#gitlab) with appropriate Personal Access Tokens (PATs). - -:::note Progress Checklist - - - -::: - -## Creating Cloud Resources for Pipelines - -To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. - -:::note - -If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. - -::: - -This guide will assume a blank slate, so you can start by creating a new Git repository to track the infrastructure that you're provisioning here. - -:::tip - -If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). - -::: - - - - -The resources that you need provisioned in AWS to start managing resources with Pipelines are: - -1. An OpenID Connect (OIDC) provider -2. An IAM role for Pipelines to assume when running Terragrunt plan commands -3. An IAM role for Pipelines to assume when running Terragrunt apply commands - -For every account you want Pipelines to manage infrastructure in. - -:::tip Don't Panic! - -This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository. - -If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. - -::: - -The process that we'll follow to get these resources ready for Pipelines is: - -1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository -2. Use Terragrunt to provision these resources in your AWS account - -### Bootstrap your `infrastructure-live` repository - -To bootstrap your AWS account for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: - -```hcl title="root.hcl" -locals { - account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) - state_bucket_name = local.account_hcl.locals.state_bucket_name - - region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) - aws_region = local.region_hcl.locals.aws_region -} - -remote_state { - backend = "s3" - generate = { - path = "backend.tf" - if_exists = "overwrite" - } - config = { - bucket = local.state_bucket_name - region = local.aws_region - key = "${path_relative_to_include()}/tofu.tfstate" - encrypt = true - use_lockfile = true - } -} - -generate "provider" { - path = "provider.tf" - if_exists = "overwrite_terragrunt" - contents = < - -::: - -This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers and remote state configuration. - -```hcl title="name-of-account/account.hcl" -locals { - // This is the name of the S3 bucket that will be created for state storage. - // - // Make sure this is globally unique across all AWS accounts, as S3 bucket names must be globally unique. - // You will need to change this. - state_bucket_name = "your-unique-bucket-name-for-state" -} -``` - -:::note Progress Checklist - - - - - -::: - -This file is used by all units in the `name-of-account` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your account. - -```hcl title="name-of-account/_global/region.hcl" -locals { - aws_region = "us-east-1" -} -``` - -:::tip - -This region configuration is being set because the AWS API needs to make API calls to _some_ AWS region, but all the resources are, in fact, global. - -The AWS IAM service is a global service, which is why we're storing the bootstrap resources in the `_global` directory. - -::: - -:::note Progress Checklist - - - - - -::: - -This file is used by all units in the `_global` directory to ensure that Terragrunt configurations know which AWS region to use for the OpenTofu AWS provider configuration. - -```hcl title="name-of-account/_global/bootstrap/terragrunt.stack.hcl" -locals { - // Read from parent configurations instead of defining these values locally - // so that other stacks and units in this directory can reuse the same configurations. - account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) -} - -stack "bootstrap" { - source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/aws/gitlab/pipelines-bootstrap?ref=v1.0.0" - path = "bootstrap" - - values = { - // Set the OIDC resource prefix you want to use for your account. - // - // This will be used to determine the names of the OIDC resources like the IAM roles that are created. - // e.g. `pipelines-plan`, `pipelines-apply`, etc. - oidc_resource_prefix = "pipelines" - - // Set the GitLab group name you want AWS to trust for OIDC. - gitlab_group_name = "acme" - - // Set the repository name you want AWS to trust for OIDC. - gitlab_repo_name = "infrastructure-live" - - // Set the GitLab instance URL (use https://gitlab.com for GitLab.com) - gitlab_instance_url = "https://gitlab.com" - - // Read from parent configurations instead of defining these values locally. - state_bucket_name = local.account_hcl.locals.state_bucket_name - } -} -``` - -:::note Progress Checklist - - - - - - - - -::: - -You'll also want to make sure that you add the `aws` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with AWS for the bootstrapping process. - -```toml title=".mise.toml" -[tools] -# The Terragrunt and OpenTofu entries should already be present... -awscli = "2.31.6" -``` - -:::tip - -Remember that you can use `ls-remote` to list the available versions of the `awscli` tool. - - ```bash -mise ls-remote awscli -``` - -::: - -Make sure to run `mise install` to install the `awscli` tool. - - ```bash -mise install - ``` - -If you haven't already, you'll want to authenticate to AWS using the `aws` CLI. - - ```bash -aws configure -``` - -:::note Progress Checklist - - - - -::: - -### Provisioning the resources - -Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account. - -First, make sure that everything is set up correctly by running a plan in the bootstrap directory. - -```bash title="name-of-account/_global/bootstrap" -terragrunt run --all --non-interactive --provider-cache plan -``` - -:::tip - -We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). - -::: - -Next, apply the changes to your account. - -```bash title="name-of-account/_global/bootstrap" -terragrunt run --all --non-interactive --provider-cache apply -``` - -:::note Progress Checklist - - - -::: - -:::tip Troubleshooting Tips - -If you encounter issues during this step, please refer to the [AWS Initial Apply Failure](#aws-initial-apply-failure) section. - -::: - - - - -The resources that you need provisioned in Azure to start managing resources with Pipelines are: - -1. An Azure Resource Group for OpenTofu state resources - 1. An Azure Storage Account in that resource group for OpenTofu state storage - 1. An Azure Storage Container in that storage account for OpenTofu state storage -2. An Entra ID Application to use for plans - 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch - 2. A Service Principal for the application to be used in role assignments - 1. A role assignment for the service principal to access the Azure subscription - 2. A role assignment for the service principal to access the Azure Storage Account -3. An Entra ID Application to use for applies - 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch - 2. A Service Principal for the application to be used in role assignments - 1. A role assignment for the service principal to access the Azure subscription - -:::tip Don't Panic! - -This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository. - -If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. - -::: - -The process that we'll follow to get these resources ready for Pipelines is: - -1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository -2. Use Terragrunt to provision these resources in your Azure subscription -3. Pull the bootstrap resources into state, using the storage account we just provisioned - -### Bootstrap your Azure `infrastructure-live` repository - -To bootstrap your Azure subscription for use with Pipelines, you'll want to add the following files to your `infrastructure-live` repository: - -```hcl title="root.hcl" -generate "provider" { - path = "provider.tf" - if_exists = "overwrite" - contents = < - -::: - -This file is used by all units in your `infrastructure-live` repository to ensure that the OpenTofu modules generated by your Terragrunt units use the appropriate providers. - -```hcl title="name-of-subscription/sub.hcl" -locals { - // This is the name of the resource group that will be created for state storage. - // - // You don't need to change this if you don't want to (and you don't already have a resource group named this). - state_resource_group_name = "pipelines-rg" - - // Make sure this is less than 24 characters, and only contains lowercase letters and numbers - // to obey Azure's naming requirements. - // - // You will need to change this. - state_storage_account_name = "name-of-storage-account-you-want-to-use-for-state" - - // This is the name of the container you'll use for state storage. - // - // You don't need to change this if you don't want to. - state_storage_container_name = "tfstate" -} -``` - -:::note Progress Checklist - - - - - - -::: - -This file is used by all units in the `name-of-subscription` directory to ensure that Terragrunt configurations have access to the information pertinent to the state resources you want to use for your subscription. - -```hcl title="name-of-subscription/bootstrap/terragrunt.stack.hcl" -locals { - // Read from parent configurations instead of defining these values locally - // so that other stacks and units in this directory can reuse the same configurations. - sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) -} - -stack "bootstrap" { - source = "github.com/gruntwork-io/terragrunt-scale-catalog//stacks/azure/gitlab/pipelines-bootstrap?ref=v1.0.0" - path = "bootstrap" - - values = { - // Set the location to the location you want to bootstrap your subscription in. - location = "East US" - - // Read from parent configurations instead of defining these values locally. - state_resource_group_name = local.sub_hcl.locals.state_resource_group_name - state_storage_account_name = local.sub_hcl.locals.state_storage_account_name - state_storage_container_name = local.sub_hcl.locals.state_storage_container_name - - // Set the GitLab group name you want Azure to trust for OIDC. - gitlab_group_name = "acme" - - // Set the repository name you want Azure to trust for OIDC. - gitlab_repo_name = "infrastructure-live" - - // Set the GitLab instance URL (use https://gitlab.com for GitLab.com) - gitlab_instance_url = "https://gitlab.com" - - // Set the OIDC resource prefix you want to use for your subscription. - // - // This will be used to determine the names of the OIDC resources like the Entra ID Applications that are created. - // e.g. `pipelines`-plan, `pipelines`-apply, etc. - oidc_resource_prefix = "pipelines" - } -} -``` - -:::note Progress Checklist - - - - - - - - -::: - -You'll also want to make sure that you add the `azure` CLI to your `.mise.toml` file, as you'll be using it to authenticate locally with Azure for the bootstrapping process. - -```toml title=".mise.toml" -[tools] -# The Terragrunt and OpenTofu entries should already be present... -azure-cli = "2.77.0" -``` - -:::tip - -Remember that you can use `ls-remote` to list the available versions of the `azure-cli` tool. - - ```bash -mise ls-remote azure-cli - ``` - -::: - -Make sure to run `mise install` to install the `azure-cli` tool. - - ```bash -mise install - ``` - -If you haven't already, you'll want to authenticate to Azure using the `az` CLI. - - ```bash -az login -``` - -:::note Progress Checklist - - - - -::: - -### Provisioning the Azure resources - -Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your Azure subscription. - -To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: - -- `ARM_TENANT_ID` -- `ARM_SUBSCRIPTION_ID` - -For example: - - ```bash -export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" -export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" -``` - -:::note Progress Checklist - - - -::: - -First, make sure that everything is set up correctly by running a plan in the subscription directory. - -```bash title="name-of-subscription" -terragrunt run --all --non-interactive --provider-cache plan -``` - -:::tip - -We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process. - -::: - -Next, apply the changes to your subscription. - -```bash title="name-of-subscription" -terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply -``` - -:::tip - -We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. - -::: - -:::note Progress Checklist - - - -::: - -:::tip Troubleshooting Tips - -If you encounter issues during this step, please refer to the [Initial Apply Failure](#azure-initial-apply-failure) section. - -::: - -### Pulling the resources into state - -Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. - -```bash title="name-of-subscription" -terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy -``` - -:::tip - -We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. - -::: - -:::note Progress Checklist - - - -::: - - - - -## Creating `.gruntwork` HCL configurations - -Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). - -### The `repository` block - -The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. - -```hcl title=".gruntwork/repository.hcl" -repository { - deploy_branch_name = "main" -} -``` - -:::note Progress Checklist - - - - -::: - -### The `environment` block - -Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). - -For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. - - - - -```hcl title=".gruntwork/environment-an-aws-account.hcl" -environment "an_aws_account" { - filter { - paths = ["an-aws-account/*"] - } - - authentication { - aws_oidc { - account_id = "123456789012" - plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" - apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" - } - } -} -``` - -:::tip - -Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. - -::: - -:::tip - -Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to authenticate with AWS conveniently. - -::: - -:::note Progress Checklist - - - - - - - - -::: - - - - -```hcl title=".gruntwork/environment-an-azure-subscription.hcl" -environment "an_azure_subscription" { - filter { - paths = ["an-azure-subscription/*"] - } - - authentication { - azure_oidc { - tenant_id = "00000000-0000-0000-0000-000000000000" - subscription_id = "11111111-1111-1111-1111-111111111111" - - plan_client_id = "33333333-3333-3333-3333-333333333333" - apply_client_id = "44444444-4444-4444-4444-444444444444" - } - } -} -``` - -:::tip - -Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. - -::: - -:::note Progress Checklist - - - - - - - - - - -::: - - - - -```hcl title=".gruntwork/environment-dev.hcl" -environment "dev" { - filter { - paths = ["dev/*"] - } - - authentication { - custom { - auth_provider_cmd = "./scripts/custom-auth-dev.sh" - } - } -} -``` - -:::tip - -Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. - -::: - -:::note Progress Checklist - - - - - - - - -::: - - - - -## Creating `.gitlab-ci.yml` - -Create a `.gitlab-ci.yml` file in the root of your `infrastructure-live` repository with the following content: - -```yaml title=".gitlab-ci.yml" -include: - - component: gitlab.com/gruntwork-io/pipelines-workflows/gitlab-ci@v4 - inputs: - stage: pipelines -``` - -:::info - -**For custom GitLab instances only**: If you are using a custom GitLab instance, you must update the component reference to point to your forked version of the pipelines-workflows project: - -```yaml title=".gitlab-ci.yml" -include: - - component: your-gitlab-instance.com/your-group/pipelines-workflows/gitlab-ci@v4 - inputs: - stage: pipelines -``` - -::: - -:::tip - -You can read the [Pipelines GitLab CI Component](https://gitlab.com/gruntwork-io/pipelines-workflows/-/blob/main/templates/gitlab-ci.yml) to learn how this GitLab CI component calls the Pipelines CLI to run your pipelines. - -::: - -:::note Progress Checklist - - - - -::: - -## Commit and push your changes - -Commit and push your changes to your repository. - - :::note - -You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow. - -::: - -```bash -git add . -git commit -m "Add Pipelines GitLab CI workflow [skip ci]" -git push -``` - -:::note Progress Checklist - - - - -::: - -🚀 You've successfully added Gruntwork Pipelines to your new repository! - -## Next steps - -You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. - -## Troubleshooting Tips - -If you encounter one of the following issues, please refer to the troubleshooting guidance for each scenario. - -### AWS Initial Apply Failure - -If your initial apply fails, follow these steps to troubleshoot the issue: - - - - - - - - -### Azure Initial Apply Failure - -If your initial apply fails, follow these steps to troubleshoot the issue: - - - - - - - - - -### GitLab CI/CD Issues - -If you encounter issues with GitLab CI/CD: - - - - - diff --git a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx new file mode 100644 index 0000000000..7055af090c --- /dev/null +++ b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx @@ -0,0 +1,463 @@ +# Bootstrap Pipelines in a New GitLab Project + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import PersistentCheckbox from '/src/components/PersistentCheckbox'; + +To configure Gruntwork Pipelines in a new GitLab project, complete the following steps (which are explained in detail below): + +1. Create an `infrastructure-live` project. +2. Configure machine user tokens for GitLab access, or ensure that the appropriate machine user tokens are set up as project or organization secrets. +3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. +4. Create `.gitlab-ci.yml` to tell your GitLab CI/CD pipeline how to run your pipelines. +5. Commit and push your changes to your project. + +## Creating the infrastructure-live project + +Creating an `infrastructure-live` project is fairly straightforward. First, create a new project using the official GitLab documentation for [creating repositories](https://docs.gitlab.com/user/project/repository/). Name the project something like `infrastructure-live` and make it private (or internal). + +## Configuring SCM Access + +Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). + +For GitLab, you'll need to configure SCM access using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs). + +:::note Progress Checklist + + + +::: + +## Creating Cloud Resources for Pipelines + +To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines. + +:::note + +If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this. + +::: + +Clone your `infrastructure-live` project repository to your local machine using [Git](https://docs.gitlab.com/user/project/repository/index.html#clone-a-repository). + +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + +::: + +For example: + +```bash +git clone git@gitlab.com:acme/infrastructure-live.git +cd infrastructure-live +``` + +:::note Progress Checklist + + + + +::: + +To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function. + +The easiest way to install Boilerplate is to use `mise` to install it. + +:::tip + +If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html). + +::: + +```bash +mise use -g boilerplate@latest +``` + +:::tip + +If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions. + +```bash +mise ls-remote boilerplate +``` + +::: + +:::note Progress Checklist + + + +::: + +### Cloud-specific bootstrap instructions + +The resources that you need provisioned in AWS to start managing resources with Pipelines are: + +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands + +For every account you want Pipelines to manage infrastructure in. + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Set up the Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines in a single AWS account +2. Use Terragrunt to provision these resources in your AWS account +3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap your `infrastructure-live` repository

+ +To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \ + --output-folder . +``` + +:::tip + +You can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitLabGroupName=acme' \ + --var 'GitLabRepoName=infrastructure-live' \ + --var 'GitLabInstanceURL=https://gitlab.com' \ + --var 'AWSAccountID=123456789012' \ + --var 'AWSRegion=us-east-1' \ + --var 'StateBucketName=my-state-bucket' \ + --non-interactive +``` + +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + +```yaml title="vars.yml" +AccountName: dev +GitLabGroupName: acme +GitLabRepoName: infrastructure-live +GitLabInstanceURL: https://gitlab.com +AWSAccountID: 123456789012 +AWSRegion: us-east-1 +StateBucketName: my-state-bucket +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` + +::: + +:::note Progress Checklist + + + +::: + +Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root of the repository after scaffolding should already be set to the versions you want for Terragrunt and OpenTofu): + +```bash +mise install +``` + +:::note Progress Checklist + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provisioning the resources

+ +Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account. + +:::tip + +Make sure that you're authenticated with AWS locally before proceeding. + +You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role. + +::: + +First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the first AWS account you want to bootstrap. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + +:::note Progress Checklist + + + +::: + +Next, apply the changes to your account. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache apply +``` + +:::note Progress Checklist + + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Optional: Bootstrapping additional AWS accounts

+ +If you have multiple AWS accounts, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process. + +For each additional account you want to bootstrap, you'll use Boilerplate in the root of your `infrastructure-live` repository to scaffold out the necessary content for just that account. + +:::tip + +If you are going to bootstrap more AWS accounts, you'll probably want to commit your existing changes before proceeding. + +```bash +git add . +git commit -m "Add core Pipelines scaffolding [skip ci]" +``` + +The `[skip ci]` in the commit message is just in-case you push your changes up to your repository at this state, as you don't want to trigger Pipelines yet. + +::: + +Just like before, you'll use Boilerplate to scaffold out the necessary content for just that account. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \ + --output-folder . +``` + +:::tip + +Again, you can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/account?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=prod' \ + --var 'AWSAccountID=987654321012' \ + --var 'AWSRegion=us-east-1' \ + --var 'StateBucketName=my-prod-state-bucket' \ + --var 'GitLabGroupName=acme' \ + --var 'GitLabRepoName=infrastructure-live' \ + --var 'GitLabInstanceURL=https://gitlab.com' \ + --non-interactive +``` + +If you prefer to store the values in a YAML file and pass it to Boilerplate using the `--var-file` flag, you can do so like this: + +```yaml title="vars.yml" +AccountName: prod +AWSAccountID: 987654321012 +AWSRegion: us-east-1 +StateBucketName: my-prod-state-bucket +GitLabGroupName: acme +GitLabRepoName: infrastructure-live +GitLabInstanceURL: https://gitlab.com +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/account?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` + +::: + +:::note Progress Checklist + + + +::: + +Once you've scaffolded out the additional accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts. + +:::tip + +Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it. + +::: + +For each account you want to bootstrap, you'll need to run the following commands: + +```bash +cd /_global/bootstrap +terragrunt run --all --non-interactive --provider-cache plan +terragrunt run --all --non-interactive --provider-cache apply +``` + +:::note Progress Checklist + + + + +::: + +## Creating `.gruntwork` HCL configurations + +Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). + +### The `repository` block + +The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. + +```hcl title=".gruntwork/repository.hcl" +repository { + deploy_branch_name = "main" +} +``` + +:::note Progress Checklist + + + + +::: + +### The `environment` block + +Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). + +For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. + +```hcl title=".gruntwork/environment-an-aws-account.hcl" +environment "an_aws_account" { + filter { + paths = ["an-aws-account/*"] + } + + authentication { + aws_oidc { + account_id = "123456789012" + plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. + +::: + +:::tip + +Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to authenticate with AWS conveniently. + +::: + +:::note Progress Checklist + + + + + + + + +::: + +## Creating `.gitlab-ci.yml` + +Create a `.gitlab-ci.yml` file in the root of your `infrastructure-live` repository with the following content: + +```yaml title=".gitlab-ci.yml" +include: + - component: gitlab.com/gruntwork-io/pipelines-workflows/gitlab-ci@v4 + inputs: + stage: pipelines +``` + +:::info + +**For custom GitLab instances only**: If you are using a custom GitLab instance, you must update the component reference to point to your forked version of the pipelines-workflows project: + +```yaml title=".gitlab-ci.yml" +include: + - component: your-gitlab-instance.com/your-group/pipelines-workflows/gitlab-ci@v4 + inputs: + stage: pipelines +``` + +::: + +:::tip + +You can read the [Pipelines GitLab CI Component](https://gitlab.com/gruntwork-io/pipelines-workflows/-/blob/main/templates/gitlab-ci.yml) to learn how this GitLab CI component calls the Pipelines CLI to run your pipelines. + +::: + +:::note Progress Checklist + + + + +::: + +## Commit and push your changes + +Commit and push your changes to your repository. + + :::note + +You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow. + +::: + +```bash +git add . +git commit -m "Add Pipelines GitLab CI workflow [skip ci]" +git push +``` + +:::note Progress Checklist + + + + +::: + +🚀 You've successfully added Gruntwork Pipelines to your new repository! + +## Next steps + +You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. From e2d137ead6fdadd74b5f3bd6edef84625401f064 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 11:02:18 -0400 Subject: [PATCH 30/39] fix: Removing unnecessary GitLab content --- .../installation/addinggitlabrepo.mdx | 103 ------------------ 1 file changed, 103 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx index 7055af090c..10b08be021 100644 --- a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx @@ -330,109 +330,6 @@ terragrunt run --all --non-interactive --provider-cache apply ::: -## Creating `.gruntwork` HCL configurations - -Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your `infrastructure-live` repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). - -### The `repository` block - -The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. - -```hcl title=".gruntwork/repository.hcl" -repository { - deploy_branch_name = "main" -} -``` - -:::note Progress Checklist - - - - -::: - -### The `environment` block - -Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). - -For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. - -```hcl title=".gruntwork/environment-an-aws-account.hcl" -environment "an_aws_account" { - filter { - paths = ["an-aws-account/*"] - } - - authentication { - aws_oidc { - account_id = "123456789012" - plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" - apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" - } - } -} -``` - -:::tip - -Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. - -::: - -:::tip - -Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to authenticate with AWS conveniently. - -::: - -:::note Progress Checklist - - - - - - - - -::: - -## Creating `.gitlab-ci.yml` - -Create a `.gitlab-ci.yml` file in the root of your `infrastructure-live` repository with the following content: - -```yaml title=".gitlab-ci.yml" -include: - - component: gitlab.com/gruntwork-io/pipelines-workflows/gitlab-ci@v4 - inputs: - stage: pipelines -``` - -:::info - -**For custom GitLab instances only**: If you are using a custom GitLab instance, you must update the component reference to point to your forked version of the pipelines-workflows project: - -```yaml title=".gitlab-ci.yml" -include: - - component: your-gitlab-instance.com/your-group/pipelines-workflows/gitlab-ci@v4 - inputs: - stage: pipelines -``` - -::: - -:::tip - -You can read the [Pipelines GitLab CI Component](https://gitlab.com/gruntwork-io/pipelines-workflows/-/blob/main/templates/gitlab-ci.yml) to learn how this GitLab CI component calls the Pipelines CLI to run your pipelines. - -::: - -:::note Progress Checklist - - - - -::: - ## Commit and push your changes Commit and push your changes to your repository. From 05d6a444e0b6be8d7d48936b71855a165ce8a5b1 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 11:24:08 -0400 Subject: [PATCH 31/39] docs: Adding existing repository instructions for GitLab --- .../pipelines/configuration/driftdetection.md | 2 +- .../docs/pipelines/configuration/settings.md | 6 +- .../installation/addingexistinggitlabrepo.mdx | 847 ++++++++++++++++++ sidebars/docs.js | 5 + 4 files changed, 855 insertions(+), 5 deletions(-) create mode 100644 docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx diff --git a/docs/2.0/docs/pipelines/configuration/driftdetection.md b/docs/2.0/docs/pipelines/configuration/driftdetection.md index f80f52b702..043ccb414b 100644 --- a/docs/2.0/docs/pipelines/configuration/driftdetection.md +++ b/docs/2.0/docs/pipelines/configuration/driftdetection.md @@ -2,4 +2,4 @@ If you are a Pipelines Enterprise customer using GitHub or GitLab and used the infrastructure-live-root repository template to install Pipelines, Drift Detection is already included and available as a workflow in your repository. -For installations not based on the template, follow the [Installing Drift Detection Guide](/2.0/docs/pipelines/guides/installing-drift-detection.md) to enable Drift Detection. +For standalone installations that did not use the `infrastructure-live-root` repository template, follow the [Installing Drift Detection Guide](/2.0/docs/pipelines/guides/installing-drift-detection.md) to enable Drift Detection. diff --git a/docs/2.0/docs/pipelines/configuration/settings.md b/docs/2.0/docs/pipelines/configuration/settings.md index 6f6627980f..9671ed755d 100644 --- a/docs/2.0/docs/pipelines/configuration/settings.md +++ b/docs/2.0/docs/pipelines/configuration/settings.md @@ -1,11 +1,9 @@ # Pipelines Configuration -[Full Pipelines Configuration Reference](/docs/2.0/reference/pipelines/configurations.md) - import PipelinesConfig from '/docs/2.0/reference/pipelines/language_auth_partial.mdx' -## Terraform & OpenTofu +## OpenTofu & Terraform -You can specify whether to invoke Terraform or OpenTofu in your Pipeline by configuring the [tf-binary](/2.0/reference/pipelines/configurations#tf-binary) setting. Define the versions of `tf-binary` and Terragrunt in the [mise.toml](/2.0/reference/pipelines/configurations#example-mise-configuration) file within your repository. +You can specify whether to invoke OpenTofu or Terraform with Pipelines by configuring the [tf-binary](/2.0/reference/pipelines/configurations#tf-binary) setting. Define the versions of Terragrunt and OpenTofu/Terraform used by Pipelines in the [mise.toml](/2.0/reference/pipelines/configurations#example-mise-configuration) file within your repository. diff --git a/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx new file mode 100644 index 0000000000..a1c093c2d4 --- /dev/null +++ b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx @@ -0,0 +1,847 @@ +# Bootstrap Pipelines in an Existing GitLab Project + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import PersistentCheckbox from '/src/components/PersistentCheckbox'; + +This guide provides comprehensive instructions for integrating [Gruntwork Pipelines](https://gruntwork.io/products/pipelines/) into an existing GitLab project with Infrastructure as Code (IaC). This is designed for Gruntwork customers who want to add Pipelines to their current infrastructure projects for streamlined CI/CD management. + +To configure Gruntwork Pipelines in an existing GitLab project, complete the following steps (which are explained in detail below): + +1. **Plan your Pipelines setup** by identifying all environments and cloud accounts/subscriptions you need to manage. +2. **Bootstrap core infrastructure** in accounts/subscriptions that don't already have the required OIDC and state management resources. +3. **Configure SCM access** using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs). +4. **Create `.gruntwork` HCL configurations** to tell Pipelines how to authenticate and organize your environments. +5. **Create `.gitlab-ci.yml`** to configure your GitLab CI/CD pipeline. +6. **Commit and push** your changes to activate Pipelines. + +## Prerequisites + +Before starting, ensure you have: + +- **An active Gruntwork subscription** with Pipelines access. Verify by checking the [Gruntwork Developer Portal](https://app.gruntwork.io/account) and confirming access to "pipelines" repositories in your GitHub team. +- **Cloud provider credentials** with permissions to create OIDC providers and IAM roles in accounts where Pipelines will manage infrastructure. +- **Git installed** locally for cloning and managing your project. +- **Existing IaC project** with Terragrunt configurations you want to manage with Pipelines (if you are using OpenTofu/Terraform, and want to start using Terragrunt, read the [Quickstart Guide](https://terragrunt.gruntwork.io/docs/getting-started/quick-start)). + +## Planning Your Pipelines Setup + +Before implementing Pipelines, it's crucial to plan your setup by identifying all the environments and cloud resources you need to manage. + +### Identify Your Environments + +Review your existing project structure and identify: + +1. **All environments** you want to manage with Pipelines (e.g., `dev`, `staging`, `prod`) +2. **Cloud accounts/subscriptions** associated with each environment +3. **Directory paths** in your project that contain Terragrunt units for each environment +4. **Existing OIDC resources** that may already be provisioned in your accounts + +:::note Progress Checklist + + + + + + +::: + +### Determine Required OIDC Roles + +For each AWS Account / Azure Subscription you want to manage, you might already have some or all of the following resources provisioned. + + + + +**Required AWS Resources:** + +- An OIDC provider for GitLab +- An IAM role for Pipelines to assume when running Terragrunt plan commands +- An IAM role for Pipelines to assume when running Terragrunt apply commands + + + + +**Required Azure Resources:** + +- Entra ID Application for plans with Federated Identity Credential +- Entra ID Application for applies with Federated Identity Credential +- Service Principals with appropriate role assignments +- Storage Account and Container for Terragrunt state storage (if not already existing) + + + + +:::note Progress Checklist + + + + +::: + +## Configuring SCM Access + +Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). + +For GitLab, you'll need to configure SCM access using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs). + +:::note Progress Checklist + + + +::: + +## Bootstrapping Cloud Infrastructure + +If your AWS accounts / Azure subscriptions don't already have all the required OIDC and state management resources, you'll need to bootstrap them. This section provides the infrastructure code needed to set up these resources. + +:::tip + +If you already have all the resources listed, you can skip this section. + +If you have some of them provisioned, but not all, you can decide to either destroy the resources you already have provisioned and recreate them or import them into state. If you are not sure, please contact [Gruntwork support](/support). + +::: + +### Prepare Your Project + +Clone your project to your local machine using [Git](https://docs.gitlab.com/user/project/repository/index.html#clone-a-repository) if you haven't already. + +:::tip + +If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads). + +::: + +For example: + +```bash +git clone git@gitlab.com:acme/infrastructure-live.git +cd infrastructure-live +``` + +:::note Progress Checklist + + + + +::: + +To bootstrap your project, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function. + +The easiest way to install Boilerplate is to use `mise` to install it. + +:::tip + +If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html). + +::: + +```bash +mise use -g boilerplate@latest +``` + +:::tip + +If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions. + +```bash +mise ls-remote boilerplate +``` + +::: + +:::note Progress Checklist + + + +::: + +If you don't already have Terragrunt and OpenTofu installed locally, you can install them using `mise`: + +```bash +mise use -g terragrunt@latest opentofu@latest +``` + +:::note Progress Checklist + + + +::: + +### Cloud-specific bootstrap instructions + + + + +The resources you need provisioned in AWS to start managing resources with Pipelines are: + +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands + +For every account you want Pipelines to manage infrastructure in. + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Use Boilerplate to scaffold bootstrap configurations in your project for each AWS account +2. Use Terragrunt to provision these resources in your AWS accounts +3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap Your Project for AWS

+ +First, confirm that you have a `root.hcl` file in the root of your project that looks something like this: + +```hcl title="root.hcl" +locals { + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) + state_bucket_name = local.account_hcl.locals.state_bucket_name + + region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) + aws_region = local.region_hcl.locals.aws_region +} + +remote_state { + backend = "s3" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + bucket = local.state_bucket_name + region = local.aws_region + key = "${path_relative_to_include()}/tofu.tfstate" + encrypt = true + use_lockfile = true + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provision AWS Bootstrap Resources

+ +Once you've scaffolded out the accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts. + +:::tip + +Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it. + +You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role. + +::: + +For each account you want to bootstrap, you'll need to run the following commands: + +First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the AWS account you want to bootstrap. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + +Next, apply the changes to your account. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache apply +``` + +:::note Progress Checklist + + + + +::: + +
+ + +The resources you need provisioned in Azure to start managing resources with Pipelines are: + +1. An Azure Resource Group for OpenTofu state resources + 1. An Azure Storage Account in that resource group for OpenTofu state storage + 1. An Azure Storage Container in that storage account for OpenTofu state storage +2. An Entra ID Application to use for plans + 1. A Flexible Federated Identity Credential for the application to authenticate with your project on any branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + 2. A role assignment for the service principal to access the Azure Storage Account +3. An Entra ID Application to use for applies + 1. A Federated Identity Credential for the application to authenticate with your project on the deploy branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Use Boilerplate to scaffold bootstrap configurations in your project for each Azure subscription +2. Use Terragrunt to provision these resources in your Azure subscription +3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned +4. Pull the bootstrap resources into state, now that we have configured a remote state backend +5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap Your Project for Azure

+ +For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \ + --output-folder . +``` + +:::tip + +You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitLabGroupName=acme' \ + --var 'GitLabRepoName=infrastructure-live' \ + --var 'GitLabInstanceURL=https://gitlab.com' \ + --var 'SubscriptionName=dev' \ + --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \ + --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \ + --var 'AzureLocation=East US' \ + --var 'StateResourceGroupName=pipelines-rg' \ + --var 'StateStorageAccountName=mysa' \ + --var 'StateStorageContainerName=tfstate' \ + --non-interactive +``` + +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + +```yaml title="vars.yml" +AccountName: dev +GitLabGroupName: acme +GitLabRepoName: infrastructure-live +GitLabInstanceURL: https://gitlab.com +SubscriptionName: dev +AzureTenantID: 00000000-0000-0000-0000-000000000000 +AzureSubscriptionID: 11111111-1111-1111-1111-111111111111 +AzureLocation: East US +StateResourceGroupName: pipelines-rg +StateStorageAccountName: my-storage-account +StateStorageContainerName: tfstate +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` + +::: + +:::note Progress Checklist + + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provision Azure Bootstrap Resources

+ +Once you've scaffolded out the subscriptions you want to bootstrap, you can use Terragrunt to provision the resources in your Azure subscription. + +If you haven't already, you'll want to authenticate to Azure using the `az` CLI. + +```bash +az login +``` + +:::note Progress Checklist + + + +::: + + +To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: + +- `ARM_TENANT_ID` +- `ARM_SUBSCRIPTION_ID` + +For example: + +```bash +export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" +export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" +``` + +:::note Progress Checklist + + + +::: + +First, make sure that everything is set up correctly by running a plan in the subscription directory. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + +:::note Progress Checklist + + + +::: + +Next, apply the changes to your subscription. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply +``` + +:::tip + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + +::: + +:::note Progress Checklist + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Finalizing Terragrunt configurations

+ +Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned. + +First, edit the `root.hcl` file in the root of your project to leverage the storage account we just provisioned. + +If your `root.hcl` file doesn't already have a remote state backend configuration, you'll need to add one that looks like this: + +```hcl title="root.hcl" +locals { + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) + + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name +} + +remote_state { + backend = "azurerm" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + resource_group_name = local.state_resource_group_name + storage_account_name = local.state_storage_account_name + container_name = local.state_storage_container_name + key = "${path_relative_to_include()}/tofu.tfstate" + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +Next, finalize the `.gruntwork/environment-.hcl` file in the root of your project to reference the IDs for the applications we just provisioned. + +You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`. + +```bash +terragrunt stack output +``` + +The relevant bits that you want to extract from the stack output are the following: + +```hcl +bootstrap = { + apply_app = { + client_id = "33333333-3333-3333-3333-333333333333" + } + plan_app = { + client_id = "44444444-4444-4444-4444-444444444444" + } +} +``` + +You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file. + +:::note Progress Checklist + + + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Pulling the resources into state

+ +Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy +``` + +:::tip + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + +::: + +:::note Progress Checklist + + + +::: + +
+
+ +## Creating `.gruntwork` HCL Configurations + +Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your project to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s). + +### The `repository` block + +The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly. + +```hcl title=".gruntwork/repository.hcl" +repository { + deploy_branch_name = "main" +} +``` + +:::note Progress Checklist + + + + +::: + +### The `environment` block + +Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block). + +For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. + + + + +```hcl title=".gruntwork/environment-production.hcl" +environment "production" { + filter { + paths = ["prod/*"] + } + + authentication { + aws_oidc { + account_id = "123456789012" + plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan" + apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page. + +::: + +:::tip + +Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to reuse common AWS configurations. + +::: + +:::note Progress Checklist + + + + + + + +::: + + + + +```hcl title=".gruntwork/environment-production.hcl" +environment "production" { + filter { + paths = ["prod/*"] + } + + authentication { + azure_oidc { + tenant_id = "00000000-0000-0000-0000-000000000000" + subscription_id = "11111111-1111-1111-1111-111111111111" + + plan_client_id = "33333333-3333-3333-3333-333333333333" + apply_client_id = "44444444-4444-4444-4444-444444444444" + } + } +} +``` + +:::tip + +Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page. + +::: + +:::note Progress Checklist + + + + + + + + +::: + + + + +```hcl title=".gruntwork/environment-production.hcl" +environment "production" { + filter { + paths = ["prod/*"] + } + + authentication { + custom { + auth_provider_cmd = "./scripts/custom-auth-prod.sh" + } + } +} +``` + +:::tip + +Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page. + +::: + +:::note Progress Checklist + + + + + + + +::: + + + + +## Creating `.gitlab-ci.yml` + +Create a `.gitlab-ci.yml` file in the root of your project with the following content: + +```yaml title=".gitlab-ci.yml" +include: + - project: 'gruntwork-io/gitlab-pipelines-workflows' + file: '/workflows/pipelines.yml' + ref: 'v1' +``` + +:::tip + +You can read the [Pipelines GitLab CI Workflow](https://gitlab.com/gruntwork-io/gitlab-pipelines-workflows) to learn how this GitLab CI pipeline calls the Pipelines CLI to run your pipelines. + +::: + +:::note Progress Checklist + + + +::: + +## Commit and Push Your Changes + +Commit and push your changes to your project. + +:::note + +You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow before everything is properly configured. + +::: + +```bash +git add . +git commit -m "Add Pipelines configurations and GitLab CI workflow [skip ci]" +git push +``` + +:::note Progress Checklist + + + + +::: + +🚀 You've successfully added Gruntwork Pipelines to your existing GitLab project! + +## Next Steps + +You have successfully completed the installation of Gruntwork Pipelines in an existing GitLab project. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes. + +## Troubleshooting Tips + +If you encounter issues during the setup process, here are some common troubleshooting steps: + +### Bootstrap Resources Failure + +If your bootstrap resource provisioning fails: + + + + + + + +### HCL Configuration Issues + +If your HCL configurations aren't working as expected: + + + + + +### GitLab CI Pipeline Issues + +If your GitLab CI pipeline isn't working as expected: + + + + + + + + + diff --git a/sidebars/docs.js b/sidebars/docs.js index e90d3dd97f..311f531900 100644 --- a/sidebars/docs.js +++ b/sidebars/docs.js @@ -283,6 +283,11 @@ const sidebar = [ type: "doc", id: "2.0/docs/pipelines/installation/addinggitlabrepo", }, + { + label: "Bootstrap Pipelines in an Existing GitLab Project", + type: "doc", + id: "2.0/docs/pipelines/installation/addingexistinggitlabrepo", + }, { label: "Adding Branch Protection to a GitLab Project", type: "doc", From ffd4df45df68ad2d1d810d7c29a30a9344d4583d Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 14:53:08 -0400 Subject: [PATCH 32/39] docs: Adding note for self-hosted GitLab instance --- .../installation/addingexistinggitlabrepo.mdx | 44 ++++++++++++++++--- .../installation/addinggitlabrepo.mdx | 42 +++++++++++++++--- 2 files changed, 75 insertions(+), 11 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx index a1c093c2d4..1ecbf9f7f6 100644 --- a/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx @@ -3,17 +3,32 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import PersistentCheckbox from '/src/components/PersistentCheckbox'; +import CustomizableValue from '/src/components/CustomizableValue'; This guide provides comprehensive instructions for integrating [Gruntwork Pipelines](https://gruntwork.io/products/pipelines/) into an existing GitLab project with Infrastructure as Code (IaC). This is designed for Gruntwork customers who want to add Pipelines to their current infrastructure projects for streamlined CI/CD management. To configure Gruntwork Pipelines in an existing GitLab project, complete the following steps (which are explained in detail below): -1. **Plan your Pipelines setup** by identifying all environments and cloud accounts/subscriptions you need to manage. -2. **Bootstrap core infrastructure** in accounts/subscriptions that don't already have the required OIDC and state management resources. -3. **Configure SCM access** using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs). -4. **Create `.gruntwork` HCL configurations** to tell Pipelines how to authenticate and organize your environments. -5. **Create `.gitlab-ci.yml`** to configure your GitLab CI/CD pipeline. -6. **Commit and push** your changes to activate Pipelines. +1. **(If using a self-hosted GitLab instance) Ensure OIDC configuration and JWKS are publicly accessible.** +2. **Plan your Pipelines setup** by identifying all environments and cloud accounts/subscriptions you need to manage. +3. **Bootstrap core infrastructure** in accounts/subscriptions that don't already have the required OIDC and state management resources. +4. **Configure SCM access** using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs). +5. **Create `.gruntwork` HCL configurations** to tell Pipelines how to authenticate and organize your environments. +6. **Create `.gitlab-ci.yml`** to configure your GitLab CI/CD pipeline. +7. **Commit and push** your changes to activate Pipelines. + +## Ensure OIDC configuration and JWKS are publicly accessible + +This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step. + +1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance. +2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps. + +:::note Progress Checklist + + + +::: ## Prerequisites @@ -268,6 +283,23 @@ boilerplate \ --non-interactive ``` +If you're using a self-hosted GitLab instance, you'll want to make sure the issuer is set correctly when calling Boilerplate. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/account?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitLabGroupName=acme' \ + --var 'GitLabRepoName=infrastructure-live' \ + --var 'GitLabInstanceURL=https://gitlab.com' \ + --var 'AWSAccountID=123456789012' \ + --var 'AWSRegion=us-east-1' \ + --var 'StateBucketName=my-state-bucket' \ + --var 'Issuer=$$ISSUER_URL$$' \ + --non-interactive +``` + You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. ```yaml title="vars.yml" diff --git a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx index 10b08be021..afe386192a 100644 --- a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx @@ -3,14 +3,29 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import PersistentCheckbox from '/src/components/PersistentCheckbox'; +import CustomizableValue from '/src/components/CustomizableValue'; To configure Gruntwork Pipelines in a new GitLab project, complete the following steps (which are explained in detail below): -1. Create an `infrastructure-live` project. -2. Configure machine user tokens for GitLab access, or ensure that the appropriate machine user tokens are set up as project or organization secrets. -3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. -4. Create `.gitlab-ci.yml` to tell your GitLab CI/CD pipeline how to run your pipelines. -5. Commit and push your changes to your project. +1. (If using a self-hosted GitLab instance) Ensure OIDC configuration and JWKS are publicly accessible. +2. Create an `infrastructure-live` project. +3. Configure machine user tokens for GitLab access, or ensure that the appropriate machine user tokens are set up as project or organization secrets. +4. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments. +5. Create `.gitlab-ci.yml` to tell your GitLab CI/CD pipeline how to run your pipelines. +6. Commit and push your changes to your project. + +## Ensure OIDC configuration and JWKS are publicly accessible + +This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step. + +1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance. +2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps. + +:::note Progress Checklist + + + +::: ## Creating the infrastructure-live project @@ -167,6 +182,23 @@ boilerplate \ --non-interactive ``` +If you're using a self-hosted GitLab instance, you'll want to make sure the issuer is set correctly when calling Boilerplate. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitLabGroupName=acme' \ + --var 'GitLabRepoName=infrastructure-live' \ + --var 'GitLabInstanceURL=https://gitlab.com' \ + --var 'AWSAccountID=123456789012' \ + --var 'AWSRegion=us-east-1' \ + --var 'StateBucketName=my-state-bucket' \ + --var 'Issuer=$$ISSUER_URL$$' \ + --non-interactive +``` + ::: :::note Progress Checklist From df85b9e5a7afe62ef29e5bc6ec49443bb2befa99 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 16:00:28 -0400 Subject: [PATCH 33/39] fix: Fixing URL for pipelines machine users install --- .../2.0/docs/accountfactory/architecture/security-controls.md | 2 +- docs/2.0/docs/accountfactory/installation/addingnewrepo.md | 2 +- docs/2.0/docs/pipelines/architecture/security-controls.md | 2 +- docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx | 2 +- docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md | 4 ++-- docs/2.0/docs/pipelines/installation/addingnewrepo.mdx | 2 +- docs/2.0/docs/pipelines/installation/authoverview.md | 2 +- docs/2.0/docs/pipelines/installation/viagithubapp.md | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/2.0/docs/accountfactory/architecture/security-controls.md b/docs/2.0/docs/accountfactory/architecture/security-controls.md index d41576a336..6e584f5c45 100644 --- a/docs/2.0/docs/accountfactory/architecture/security-controls.md +++ b/docs/2.0/docs/accountfactory/architecture/security-controls.md @@ -79,7 +79,7 @@ Requires the following tokens be created: - `INFRA_ROOT_WRITE_TOKEN`: Fine-grained PAT with read/write access to infrastructure repositories - `ORG_REPO_ADMIN_TOKEN`: Fine-grained PAT with admin access for repository management -See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers.md) for more details. +See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers) for more details.
diff --git a/docs/2.0/docs/accountfactory/installation/addingnewrepo.md b/docs/2.0/docs/accountfactory/installation/addingnewrepo.md index 9b4414370b..bb248b70cf 100644 --- a/docs/2.0/docs/accountfactory/installation/addingnewrepo.md +++ b/docs/2.0/docs/accountfactory/installation/addingnewrepo.md @@ -23,7 +23,7 @@ Navigate to the template repository and select **Use this template** -> **Create Use the Gruntwork.io GitHub App to [add the repository as an Infra Root repository](/2.0/docs/pipelines/installation/viagithubapp#configuration). -If using the [machine user model](/2.0/docs/pipelines/installation/viamachineusers.md), ensure the `INFRA_ROOT_WRITE_TOKEN` (and `ORG_REPO_ADMIN_TOKEN` for enterprise customers) is added to the repository as a secret or configured as an organization secret. +If using the [machine user model](/2.0/docs/pipelines/installation/viamachineusers), ensure the `INFRA_ROOT_WRITE_TOKEN` (and `ORG_REPO_ADMIN_TOKEN` for enterprise customers) is added to the repository as a secret or configured as an organization secret. ## Updating the Bootstrap Workflow diff --git a/docs/2.0/docs/pipelines/architecture/security-controls.md b/docs/2.0/docs/pipelines/architecture/security-controls.md index 6b88e281b8..1c70edec13 100644 --- a/docs/2.0/docs/pipelines/architecture/security-controls.md +++ b/docs/2.0/docs/pipelines/architecture/security-controls.md @@ -47,7 +47,7 @@ Requires that the following tokens are created: - `INFRA_ROOT_WRITE_TOKEN`: Fine-grained PAT with read/write access to infrastructure repositories - `ORG_REPO_ADMIN_TOKEN`: Fine-grained PAT with admin access for repository management -See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers.md) for more details. +See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers) for more details. diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx index 5b25a87270..c23b092f36 100644 --- a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx @@ -86,7 +86,7 @@ Pipelines needs the ability to interact with Source Control Management (SCM) pla There are two ways to configure SCM access for Pipelines: 1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). -2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitHub users who cannot use the GitHub App). +2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers) (recommended for GitHub users who cannot use the GitHub App). :::note Progress Checklist diff --git a/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md b/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md index 024a2c5183..14a30f97ff 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md +++ b/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md @@ -13,8 +13,8 @@ To use Gruntwork Pipelines in an **existing** GitLab repository, see this [guide Before you begin, make sure you have: - Basic familiarity with Git, GitLab, and infrastructure as code concepts -- Completed the [AWS Landing Zone setup](/2.0/docs/pipelines/installation/prerequisites/awslandingzone) -- Have programmatic access to the AWS accounts created in the [AWS Landing Zone setup](/2.0/docs/pipelines/installation/prerequisites/awslandingzone) +- Completed the [AWS Landing Zone setup](/2.0/docs/accountfactory/prerequisites/awslandingzone) +- Have programmatic access to the AWS accounts created in the [AWS Landing Zone setup](/2.0/docs/accountfactory/prerequisites/awslandingzone) - Completed the [Pipelines Auth setup for GitLab](/2.0/docs/pipelines/installation/viamachineusers#gitlab) and setup a machine user with appropriate PAT tokens - Local access to Gruntwork's GitHub repositories, specifically the [architecture catalog](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/) diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx index 3af96f9b28..aff5927f5d 100644 --- a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx @@ -23,7 +23,7 @@ Pipelines needs the ability to interact with Source Control Management (SCM) pla There are two ways to configure SCM access for Pipelines: 1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users). -2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers.md) (recommended for GitHub users who cannot use the GitHub App). +2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers) (recommended for GitHub users who cannot use the GitHub App). :::note Progress Checklist diff --git a/docs/2.0/docs/pipelines/installation/authoverview.md b/docs/2.0/docs/pipelines/installation/authoverview.md index 18706bca67..b9023e00bc 100644 --- a/docs/2.0/docs/pipelines/installation/authoverview.md +++ b/docs/2.0/docs/pipelines/installation/authoverview.md @@ -12,7 +12,7 @@ Gruntwork Pipelines requires authentication with Source Control Management (SCM) Gruntwork provides two authentication methods: - [The Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md) -- [CI Users (Machine Users)](/2.0/docs/pipelines/installation/viamachineusers.md) +- [CI Users (Machine Users)](/2.0/docs/pipelines/installation/viamachineusers) Both approaches support the core functionality of Pipelines. The GitHub App provides additional features and benefits, making it the recommended method for most customers that can use it. While Gruntwork strives to ensure feature parity between the two authentication mechanisms, certain advanced features are exclusive to the GitHub App, and this list is expected to grow over time. diff --git a/docs/2.0/docs/pipelines/installation/viagithubapp.md b/docs/2.0/docs/pipelines/installation/viagithubapp.md index a3e87289f6..501f83dfcc 100644 --- a/docs/2.0/docs/pipelines/installation/viagithubapp.md +++ b/docs/2.0/docs/pipelines/installation/viagithubapp.md @@ -132,7 +132,7 @@ The following features of the Gruntwork.io GitHub App will be unavailable during ### Fallback -In order to ensure that the availability of the Gruntwork.io GitHub App is not something that can impair the ability of users to drive infrastructure updates, users can also authenticate with GitHub using [Machine users](/2.0/docs/pipelines/installation/viamachineusers.md). +In order to ensure that the availability of the Gruntwork.io GitHub App is not something that can impair the ability of users to drive infrastructure updates, users can also authenticate with GitHub using [Machine users](/2.0/docs/pipelines/installation/viamachineusers). Configuring the `PIPELINES_READ_TOKEN`, `INFRA_ROOT_WRITE_TOKEN` and `ORG_REPO_ADMIN_TOKEN` where necessary (following the documentation linked above) will result in Pipelines using the machine users mechanism to authenticate with GitHub, rather than the Gruntwork.io GitHub App. From ab11b0d45ef2ca8355c6d0e2dfc8de838f76817e Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 16:47:29 -0400 Subject: [PATCH 34/39] fix: Satisfying spellcheck --- custom-dictionary.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/custom-dictionary.txt b/custom-dictionary.txt index 0b7ec3f155..7911f890ff 100644 --- a/custom-dictionary.txt +++ b/custom-dictionary.txt @@ -63,3 +63,6 @@ self-hosting infrachanges Entra GLMU +myprodsa +azuread +mysa From 6d0cb39251e79dba9cf20389fa33acf2766483d7 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 16:52:27 -0400 Subject: [PATCH 35/39] fix: Fixing auth links --- docs/2.0/docs/pipelines/concepts/cloud-auth/index.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/2.0/docs/pipelines/concepts/cloud-auth/index.md b/docs/2.0/docs/pipelines/concepts/cloud-auth/index.md index b016790dc5..aa91b06be3 100644 --- a/docs/2.0/docs/pipelines/concepts/cloud-auth/index.md +++ b/docs/2.0/docs/pipelines/concepts/cloud-auth/index.md @@ -17,9 +17,9 @@ Cloud authentication in Pipelines is built on the principle of least privilege a Currently, Pipelines supports authentication to the following cloud providers: -- [AWS](./aws.mdx) - AWS authentication using OIDC -- [Azure](./azure.md) - Azure authentication using OIDC -- [Custom](./custom.md) - Custom authentication you can implement yourself +- [AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) - AWS authentication using OIDC +- [Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) - Azure authentication using OIDC +- [Custom](/2.0/docs/pipelines/concepts/cloud-auth/custom) - Custom authentication you can implement yourself ## Security Best Practices From 10a54e9339ad35598f788786e949c87aa97e6701 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Mon, 6 Oct 2025 20:49:58 -0400 Subject: [PATCH 36/39] fix: Addressing easy to address PR feedback --- .../installation/addingexistinggitlabrepo.mdx | 26 ++++++++++++++----- .../installation/addingexistingrepo.mdx | 22 ++++++++++++---- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx index 1ecbf9f7f6..5ac66ca6dd 100644 --- a/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx @@ -65,7 +65,7 @@ Review your existing project structure and identify: For each AWS Account / Azure Subscription you want to manage, you might already have some or all of the following resources provisioned. - + **Required AWS Resources:** @@ -96,9 +96,9 @@ For each AWS Account / Azure Subscription you want to manage, you might already ## Configuring SCM Access -Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). +Pipelines needs the ability to interact with GitLab to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself). -For GitLab, you'll need to configure SCM access using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs). +To create machine users for GitLab access, follow our [machine users guide](/2.0/docs/pipelines/installation/viamachineusers) to set up the appropriate Personal Access Tokens (PATs) with the required permissions. :::note Progress Checklist @@ -186,7 +186,7 @@ mise use -g terragrunt@latest opentofu@latest ### Cloud-specific bootstrap instructions - + The resources you need provisioned in AWS to start managing resources with Pipelines are: @@ -253,7 +253,7 @@ EOF If you don't have a `root.hcl` file, you might need to customize the bootstrapping process, as the Terragrunt scale catalog expects a `root.hcl` file in the root of the project. Please contact [Gruntwork support](/support) for assistance if you need help. -For each AWS account that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. +For each AWS account that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each account: ```bash boilerplate \ @@ -263,6 +263,12 @@ boilerplate \ :::tip +You'll need to run this boilerplate command once for each AWS account you want to manage with Pipelines. Boilerplate will prompt you for account-specific values each time. + +::: + +:::tip + You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. @@ -405,7 +411,7 @@ The process that we'll follow to get these resources ready for Pipelines is: {/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}

Bootstrap Your Project for Azure

-For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. +For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each subscription: ```bash boilerplate \ @@ -415,6 +421,12 @@ boilerplate \ :::tip +You'll need to run this boilerplate command once for each Azure subscription you want to manage with Pipelines. Boilerplate will prompt you for subscription-specific values each time. + +::: + +:::tip + You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. @@ -676,7 +688,7 @@ Next, you'll want to define the environments you want to manage with Pipelines u For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. - + ```hcl title=".gruntwork/environment-production.hcl" diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx index c23b092f36..94b88343c9 100644 --- a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx +++ b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx @@ -50,7 +50,7 @@ Review your existing repository structure and identify: For each AWS Account / Azure Subscription you want to manage, you might already have some or all of the following resources provisioned. - + **Required AWS Resources:** @@ -174,7 +174,7 @@ mise use -g terragrunt@latest opentofu@latest ### Cloud-specific bootstrap instructions - + The resources you need provisioned in AWS to start managing resources with Pipelines are: @@ -241,7 +241,7 @@ EOF If you don't have a `root.hcl` file, you might need to customize the bootstrapping process, as the Terragrunt scale catalog expects a `root.hcl` file in the root of the repository. Please contact [Gruntwork support](/support) for assistance if you need help. -For each AWS account that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. +For each AWS account that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your repository for each account: ```bash boilerplate \ @@ -251,6 +251,12 @@ boilerplate \ :::tip +You'll need to run this boilerplate command once for each AWS account you want to manage with Pipelines. Boilerplate will prompt you for account-specific values each time. + +::: + +:::tip + You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. @@ -374,7 +380,7 @@ The process that we'll follow to get these resources ready for Pipelines is: {/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}

Bootstrap Your Repository for Azure

-For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. +For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your repository for each subscription: ```bash boilerplate \ @@ -384,6 +390,12 @@ boilerplate \ :::tip +You'll need to run this boilerplate command once for each Azure subscription you want to manage with Pipelines. Boilerplate will prompt you for subscription-specific values each time. + +::: + +:::tip + You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. @@ -643,7 +655,7 @@ Next, you'll want to define the environments you want to manage with Pipelines u For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment. - + ```hcl title=".gruntwork/environment-production.hcl" From e2af1bd717d1125e7249065312109c4d54481532 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:12:29 -0400 Subject: [PATCH 37/39] fix: Removing `addingnewgitlabrepo.md` --- .../docs/overview/getting-started/index.md | 2 +- .../installation/addingnewgitlabrepo.md | 547 ------------------ 2 files changed, 1 insertion(+), 548 deletions(-) delete mode 100644 docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md diff --git a/docs/2.0/docs/overview/getting-started/index.md b/docs/2.0/docs/overview/getting-started/index.md index 4d2a3fb34e..b1e7af9238 100644 --- a/docs/2.0/docs/overview/getting-started/index.md +++ b/docs/2.0/docs/overview/getting-started/index.md @@ -22,7 +22,7 @@ Set up authentication for Pipelines to enable secure automation of infrastructur ### Step 4: Create new Pipelines repositories - [New GitHub repository](/2.0/docs/pipelines/installation/addingnewrepo) -- [New GitLab repository](/2.0/docs/pipelines/installation/addingnewgitlabrepo) +- [New GitLab repository](/2.0/docs/pipelines/installation/addinggitlabrepo) Alternatively, you can add Pipelines to an existing repository: diff --git a/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md b/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md deleted file mode 100644 index 14a30f97ff..0000000000 --- a/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md +++ /dev/null @@ -1,547 +0,0 @@ -import CustomizableValue from '/src/components/CustomizableValue'; - -# Creating a New GitLab Project with Pipelines - -This guide walks you through the process of setting up a new GitLab Project with the Gruntwork Platform. By the end, you'll have a fully configured GitLab CI/CD pipeline that can create new AWS accounts and deploy infrastructure changes automatically. - -:::info -To use Gruntwork Pipelines in an **existing** GitLab repository, see this [guide](/2.0/docs/pipelines/installation/addinggitlabrepo). -::: - -## Prerequisites - -Before you begin, make sure you have: - -- Basic familiarity with Git, GitLab, and infrastructure as code concepts -- Completed the [AWS Landing Zone setup](/2.0/docs/accountfactory/prerequisites/awslandingzone) -- Have programmatic access to the AWS accounts created in the [AWS Landing Zone setup](/2.0/docs/accountfactory/prerequisites/awslandingzone) -- Completed the [Pipelines Auth setup for GitLab](/2.0/docs/pipelines/installation/viamachineusers#gitlab) and setup a machine user with appropriate PAT tokens -- Local access to Gruntwork's GitHub repositories, specifically the [architecture catalog](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/) - -
-Additional setup for **custom GitLab instances only** - -### Fork the Pipelines workflow project - -You must [fork](https://docs.gitlab.com/user/project/repository/forking_workflow/#create-a-fork) Gruntwork's public [Pipelines workflow project](https://gitlab.com/gruntwork-io/pipelines-workflows) into your own GitLab instance. -This is necessary because Gruntwork Pipelines uses [GitLab CI/CD components](/2.0/docs/pipelines/architecture/ci-workflows), and GitLab requires components to reside within the [same GitLab instance as the project referencing them](https://docs.gitlab.com/ci/components/#use-a-component). - -When creating the fork, we recommend configuring it as a public mirror of the original Gruntwork project and ensuring that tags are included. - -### Ensure OIDC configuration and JWKS are publicly accessible - -This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step. - -1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance. -2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps. -
- -1. Create a new GitLab project for your `infrastructure-live-root` repository. -1. Install dependencies. -1. Configure the variables required to run the infrastructure-live-root boilerplate template. -1. Create your `infrastructure-live-root` repository contents using Gruntwork's architecture-catalog template. -1. Apply the account baselines to your AWS accounts. - - -## Create a new infrastructure-live-root - -### Authorize Your GitLab Group with Gruntwork - -To use Gruntwork Pipelines with GitLab, your group needs authorization from Gruntwork. Email your Gruntwork account manager or support@gruntwork.io with: - - ``` - GitLab group name(s): $$GITLAB_GROUP_NAME$$ (e.g. acme-io) - GitLab Issuer URL: $$ISSUER_URL$$ (For most users this is the URL of your GitLab instance e.g. https://gitlab.acme.io, if your instance is not publicly accessible, this should be a separate URL that is publicly accessible per step 0, e.g. https://s3.amazonaws.com/YOUR_BUCKET_NAME/) - Organization name: $$ORGANIZATION_NAME$$ (e.g. Acme, Inc.) - ``` - -Continue with the rest of the guide while you await confirmation when your group has been authorized. - -### Create a new GitLab project - -1. Navigate to the group. -1. Click the **New Project** button. -1. Enter a name for the project. e.g. infrastructure-live-root -1. Click **Create Project**. -1. Clone the project to your local machine. -1. Navigate to the project directory. -1. Create a new branch `bootstrap-repository`. - -### Install dependencies - -1. Install [mise](https://mise.jdx.dev/getting-started.html) on your machine. -1. Activate mise in your shell: - - ```bash - # For Bash - echo 'eval "$(~/.local/bin/mise activate bash)"' >> ~/.bashrc - - # For Zsh - echo 'eval "$(~/.local/bin/mise activate zsh)"' >> ~/.zshrc - - # For Fish - echo 'mise activate fish | source' >> ~/.config/fish/config.fish - ``` - -1. Add the following to a .mise.toml file in the root of your project: - - ```toml title=".mise.toml" - [tools] - boilerplate = "0.8.1" - opentofu = "1.10.0" - terragrunt = "0.81.6" - awscli = "latest" - ``` - -1. Run `mise install`. - - -### Bootstrap the repository - -Gruntwork provides a boilerplate [template](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/tree/main/templates/devops-foundations-infrastructure-live-root) that incorporates best practices while allowing for customization. The template is designed to scaffold a best-practices Terragrunt configurations. It includes patterns for module defaults, global variables, and account baselines. Additionally, it integrates Gruntwork Pipelines. - -#### Configure the variables required to run the boilerplate template - -Copy the content below to a `vars.yaml` file in the root of your project and update the `` values with your own. - -```yaml title="vars.yaml" -SCMProvider: GitLab - -# The GitLab group to use for the infrastructure repositories. This should include any additional sub-groups in the name -# Example: acme/prod -SCMProviderGroup: $$GITLAB_GROUP_NAME$$ - -# The GitLab project to use for the infrastructure-live repository. -SCMProviderRepo: infrastructure-live-root - -# The base URL of your GitLab group repos. E.g., gitlab.com/ -RepoBaseUrl: $$GITLAB_GROUP_REPO_BASE_URL$$ - -# The name of the branch to deploy to. -# Example: main -DeployBranchName: $$DEPLOY_BRANCH_NAME$$ - -# The AWS account ID for the management account -# Example: "123456789012" -AwsManagementAccountId: $$AWS_MANAGEMENT_ACCOUNT_ID$$ - -# The AWS account ID for the security account -# Example: "123456789013" -AwsSecurityAccountId: $$AWS_SECURITY_ACCOUNT_ID$$ - -# The AWS account ID for the logs account -# Example: "123456789014" -AwsLogsAccountId: $$AWS_LOGS_ACCOUNT_ID$$ - -# The AWS account ID for the shared account -# Example: "123456789015" -AwsSharedAccountId: $$AWS_SHARED_ACCOUNT_ID$$ - -# The AWS account Email for the logs account -# Example: logs@acme.com -AwsLogsAccountEmail: $$AWS_LOGS_ACCOUNT_EMAIL$$ - -# The AWS account Email for the management account -# Example: management@acme.com -AwsManagementAccountEmail: $$AWS_MANAGEMENT_ACCOUNT_EMAIL$$ - -# The AWS account Email for the security account -# Example: security@acme.com -AwsSecurityAccountEmail: $$AWS_SECURITY_ACCOUNT_EMAIL$$ - -# The AWS account Email for the shared account -# Example: shared@acme.com -AwsSharedAccountEmail: $$AWS_SHARED_ACCOUNT_EMAIL$$ - -# The name prefix to use for creating resources e.g S3 bucket for OpenTofu state files -# Example: acme -OrgNamePrefix: $$ORG_NAME_PREFIX$$ - -# The default region for AWS Resources -# Example: us-east-1 -DefaultRegion: $$DEFAULT_REGION$$ - -################################################################################ -# OPTIONAL VARIABLES WITH THEIR DEFAULT VALUES. UNCOMMENT AND MODIFY IF NEEDED. -################################################################################ - -# List of the git repositories to populate for the catalog -# CatalogRepositories: -# - github.com/gruntwork-io/terraform-aws-service-catalog - -# The AWS partition to use. Options: aws, aws-us-gov -# AWSPartition: aws - -# The name of the IAM role to use for the plan job. -# PlanIAMRoleName: root-pipelines-plan - -# The name of the IAM role to use for the apply job. -# ApplyIAMRoleName: root-pipelines-apply - -# The default tags to apply to all resources. -# DefaultTags: -# "{{ .OrgNamePrefix }}:Team": "DevOps" - -# The version for terraform-aws-security module to use for OIDC provider and roles provisioning -# SecurityModulesVersion: v0.75.18 - -# The URL of the custom SCM provider instance. Set this if you are using a custom instance of GitLab. -# CustomSCMProviderInstanceURL: https://gitlab.example.io - -# The relative path from the host server to the custom pipelines workflow repository. Set this if you are using a custom/forked instance of the pipelines workflow. -# CustomWorkflowHostRelativePath: pipelines-workflows -``` - -#### Generate the repository contents - -1. Run the following command, from the root of your project, to generate the `infrastructure-live-root` repository contents: - - - ```bash - boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/devops-foundations-infrastructure-live-root/?ref=main" --output-folder . --var-file vars.yaml --non-interactive - ``` - - This command adds all code required to set up your `infrastructure-live-root` repository. -1. Remove the boilerplate dependency from the `mise.toml` file. It is no longer needed. - -1. Commit your local changes and push them to the `bootstrap-repository` branch. - - ```bash - git add . - git commit -m "Bootstrap infrastructure-live-root repository initial commit [skip ci]" - git push origin bootstrap-repository - ``` - - Skipping the CI/CD process for now; you will manually apply the infrastructure baselines to your AWS accounts in a later step. - -1. Create a new merge request for the `bootstrap-repository` branch. Review the changes to understand what will be applied to your AWS accounts. The generated files fall under the following categories: - - - GitLab Pipelines workflow file - - Gruntwork Pipelines configuration files - - Module defaults files for infrastructure code - - Account baselines and GitLab OIDC module scaffolding files for your core AWS accounts: management, security, logs and shared. - -### Apply the account baselines to your AWS accounts - -You will manually `terragrunt apply` the generated infrastructure baselines to get your accounts bootstrapped **before** merging this content into your main branch. - -:::tip -You can utilize the AWS SSO Portal to obtain temporary AWS credentials necessary for subsequent steps: - -1. Sign in to the Portal page and select your preferred account to unveil the roles accessible to your SSO user. -1. Navigate to the "Access keys" tab adjacent to the "AWSAdministratorAccess" role. -1. Copy the "AWS environment variables" provided and paste them into your terminal for usage. -::: - - -1. [ ] Apply infrastructure changes in the **management** account - - 1. - [ ] Obtain AWS CLI Administrator credentials for the management account - - 1. - [ ] Navigate to the management account folder - - ```bash - cd management/ - ``` - - 1. - [ ] Using your credentials, run `terragrunt plan`. - - ```bash - terragrunt run --all plan --terragrunt-non-interactive - ``` - - 1. - [ ] After the plan succeeds, apply the changes: - - ```bash - terragrunt run --all apply --terragrunt-non-interactive - ``` - - 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. The lock files will be committed in the final step of the setup. e.g. - - ```bash - terragrunt run --all providers -- lock -platform=darwin_amd64 -platform=linux_amd64 - ``` - - 1. - [ ] Update Permissions for Account Factory Portfolio - - The account factory pipeline _will fail_ until you grant the pipelines roles (`root-pipelines-plan` and `root-pipelines-apply`) access to the portfolio. This step **must be done after** you provision the pipelines roles in the management account (where control tower is set up). - - Access to the portfolio is separate from IAM access, it **must** be granted in the Service Catalog console. - - #### **Steps to grant access** - - To grant access to the Account Factory Portfolio, you **must** be an individual with Service Catalog administrative permissions. - - 1. Log into the management AWS account - 1. Go into the Service Catalog console - 1. Ensure you are in your default region(control-tower region) - 1. Select the **Portfolios** option in **Administration** from the left side navigation panel - 1. Click on the portfolio named **AWS Control Tower Account Factory Portfolio** - 1. Select the **Access** tab - 1. Click the **Grant access** button - 1. In the **Access type** section, leave the default value of **IAM Principal** - 1. Select the **Roles** tab in the lower section - 1. Enter `root-pipelines` into the search bar, there should be two results (`root-pipelines-plan` and `root-pipelines-apply`). Click the checkbox to the left of each role name. - 1. Click the **Grant access** button in the lower right hand corner - - 1. - [ ] Increase Account Quota Limit (OPTIONAL) - - Note that DevOps Foundations makes it very convenient, and therefore likely, that you will encounter one of the soft limits imposed by AWS on the number of accounts you can create. - - You may need to request a limit increase for the number of accounts you can create in the management account, as the default is currently 10 accounts. - - To request an increase to this limit, search for "Organizations" in the AWS management console [here](https://console.aws.amazon.com/servicequotas/home/dashboard) and request a limit increase to a value that makes sense for your organization. - -1. - [ ] Apply infrastructure changes in the **logs** account - - 1. - [ ] Obtain AWS CLI Administrator credentials for the logs account - 1. - [ ] Navigate to the logs account folder - - ```bash - cd ../logs/ - ``` - - 1. - [ ] Using your credentials, run `terragrunt plan`. - - ```bash - terragrunt run --all plan --terragrunt-non-interactive - ``` - - 1. - [ ] After the plan succeeds, apply the changes: - - ```bash - terragrunt run --all apply --terragrunt-non-interactive - ``` - - 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. e.g. - - ```bash - terragrunt run --all providers lock -platform=darwin_amd64 -platform=linux_amd64 - ``` - -1. - [ ] Apply infrastructure changes in the **security** account - - 1. - [ ] Obtain AWS CLI Administrator credentials for the security account - 1. - [ ] Navigate to the security account folder - - ```bash - cd ../security/ - ``` - - 1. - [ ] Using your credentials, run `terragrunt plan`. - - ```bash - terragrunt run --all plan --terragrunt-non-interactive - ``` - - 1. - [ ] After the plan succeeds, apply the changes: - - ```bash - terragrunt run --all apply --terragrunt-non-interactive - ``` - - 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. e.g. - - ```bash - terragrunt run --all providers lock -platform=darwin_amd64 -platform=linux_amd64 - ``` - -1. - [ ] Apply infrastructure changes in the **shared** account - - 1. - [ ] Obtain AWS CLI Administrator credentials for the shared account. You may need to grant your user access to the `AWSAdministratorAccess` permission set in the shared account from the management account's Identity Center Admin console. - 1. - [ ] Using your credentials, create a service role - - ```bash - aws iam create-service-linked-role --aws-service-name autoscaling.amazonaws.com - ``` - - 1. - [ ] Navigate to the shared account folder - - ```bash - cd ../shared/ - ``` - - 1. - [ ] Using your credentials, run `terragrunt plan`. - - ```bash - terragrunt run --all plan --terragrunt-non-interactive - ``` - - 1. - [ ] After the plan succeeds, apply the changes: - - ```bash - terragrunt run --all apply --terragrunt-non-interactive - ``` - - 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. e.g. - - ```bash - terragrunt run --all providers lock -platform=darwin_amd64 -platform=linux_amd64 - ``` - -1. - [ ] Commit your local changes and push them to the `bootstrap-repository` branch. - - ```bash - cd .. - git add . - git commit -m "Bootstrap infrastructure-live-root repository final commit [skip ci]" - git push origin bootstrap-repository - ``` - -1. - [ ] Merge the open merge request. **Ensure [skip ci] is present in the commit message.** - - -## Create a new infrastructure-live-access-control (optional) - -### Create a new GitLab project - -1. Navigate to the group. -1. Click the **New Project** button. -1. Enter the name for the project as `infrastructure-live-access-control`. -1. Click **Create Project**. -1. Clone the project to your local machine. -1. Navigate to the project directory. -1. Create a new branch `bootstrap-repository`. - -### Install dependencies - -Run `mise install boilerplate@0.8.1` to install the boilerplate tool. - -### Bootstrap the repository - -#### Configure the variables required to run the boilerplate template - -Copy the content below to a `vars.yaml` file in the root of your project and update the customizable values as needed. - -```yaml title="vars.yaml" -SCMProvider: GitLab - -# The GitLab group to use for the infrastructure repositories. This should include any additional sub-groups in the name -# Example: acme/prod -SCMProviderGroup: $$GITLAB_GROUP_NAME$$ - -# The GitLab project to use for the infrastructure-live repository. -SCMProviderRepo: infrastructure-live-access-control - -# The name of the branch to deploy to. -# Example: main -DeployBranchName: $$DEPLOY_BRANCH_NAME$$ - -# The name prefix to use for creating resources e.g S3 bucket for OpenTofu state files -# Example: acme -OrgNamePrefix: $$ORG_NAME_PREFIX$$ - -# The default region for AWS Resources -# Example: us-east-1 -DefaultRegion: $$DEFAULT_REGION$$ - -################################################################################ -# OPTIONAL VARIABLES WITH THEIR DEFAULT VALUES. UNCOMMENT AND MODIFY IF NEEDED. -################################################################################ - -# The AWS partition to use. -# AWSPartition: aws -``` - -#### Generate the repository contents - -1. Run the following command, from the root of your project, to generate the `infrastructure-live-access-control` repository contents: - - - ```bash - boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/devops-foundations-infrastructure-live-access-control/?ref=main" --output-folder . --var-file vars.yaml --non-interactive - ``` - - This command adds all code required to set up your `infrastructure-live-access-control` repository. The generated files fall under the following categories: - - - GitLab Pipelines workflow file - - Gruntwork Pipelines configuration files - - Module defaults files for GitLab OIDC roles and policies - - -2. Commit your local changes and push them to the `bootstrap-repository` branch. - - ```bash - git add . - git commit -m "Bootstrap infrastructure-live-access-control repository [skip ci]" - git push origin bootstrap-repository - ``` - - Skipping the CI/CD process now because there is no infrastructure to apply; repository simply contains the GitLab OIDC role module defaults to enable GitLab OIDC authentication from repositories other than `infrastructure-live-root`. - -3. Create a new merge request for the `bootstrap-repository` branch. Review the changes to understand the GitLab OIDC role module defaults. -4. Merge the open merge request. **Ensure [skip ci] is present in the commit message.** - -## Create a new infrastructure-catalog (optional) - -The `infrastructure-catalog` repository is a collection of modules that can be used to build your infrastructure. It is a great way to share modules with your team and across your organization. Learn more about the [Developer Self-Service](/2.0/docs/overview/concepts/developer-self-service) concept. - -### Create a new GitLab project - -1. Navigate to the group. -1. Click the **New Project** button. -1. Enter the name for the project as `infrastructure-catalog`. -1. Click **Create Project**. -1. Clone the project to your local machine. -1. Navigate to the project directory. -1. Create a new branch `bootstrap-repository`. - -### Install dependencies - -Run `mise install boilerplate@0.8.1` to install the boilerplate tool. - -### Bootstrap the repository - -#### Configure the variables required to run the boilerplate template - -Copy the content below to a `vars.yaml` file in the root of your project and update the customizable values as needed. - -```yaml title="vars.yaml" -# The name of the repository to use for the catalog. -InfraModulesRepoName: infrastructure-catalog - -# The version of the Gruntwork Service Catalog to use. https://github.com/gruntwork-io/terraform-aws-service-catalog -ServiceCatalogVersion: v0.111.2 - -# The version of the Gruntwork VPC module to use. https://github.com/gruntwork-io/terraform-aws-vpc -VpcVersion: v0.26.22 - -# The default region for AWS Resources -# Example: us-east-1 -DefaultRegion: $$DEFAULT_REGION$$ - -################################################################################ -# OPTIONAL VARIABLES WITH THEIR DEFAULT VALUES. UNCOMMENT AND MODIFY IF NEEDED. -################################################################################ - -# The base URL of the Organization to use for the catalog. -# If you are using Gruntwork's RepoCopier tool, this should be the base URL of the repository you are copying from. -# RepoBaseUrl: github.com/gruntwork-io - -# The name prefix to use for the Gruntwork RepoCopier copied repositories. -# Example: gruntwork-io- -# GWCopiedReposNamePrefix: -``` - - -#### Generate the repository contents - -1. Run the following command, from the root of your project, to generate the `infrastructure-catalog` repository contents: - - - ```bash - boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/devops-foundations-infrastructure-modules/?ref=main" --output-folder . --var-file vars.yaml --non-interactive - ``` - - This command adds some code required to set up your `infrastructure-catalog` repository. The generated files are some usable modules for your infrastructure. - -1. Commit your local changes and push them to the `bootstrap-repository` branch. - - ```bash - git add . - git commit -m "Bootstrap infrastructure-catalog repository" - git push origin bootstrap-repository - ``` - -1. Create a new merge request for the `bootstrap-repository` branch. Review the changes to understand the example Service Catalog modules. -1. Merge the open merge request. From c0dbd94fa1e27875af1ccc0b1d2b9114ee5002d3 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:31:21 -0400 Subject: [PATCH 38/39] fix: Rename `getting-started` `index.md` to `index.mdx` --- .../docs/overview/getting-started/{index.md => index.mdx} | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) rename docs/2.0/docs/overview/getting-started/{index.md => index.mdx} (96%) diff --git a/docs/2.0/docs/overview/getting-started/index.md b/docs/2.0/docs/overview/getting-started/index.mdx similarity index 96% rename from docs/2.0/docs/overview/getting-started/index.md rename to docs/2.0/docs/overview/getting-started/index.mdx index b1e7af9238..9d5c306b74 100644 --- a/docs/2.0/docs/overview/getting-started/index.md +++ b/docs/2.0/docs/overview/getting-started/index.mdx @@ -1,6 +1,7 @@ -# Setting up DevOps Foundations & Components import PersistentCheckbox from '/src/components/PersistentCheckbox'; +# Setting up DevOps Foundations & Components + ### Step 1: [Activate your Gruntwork account](/2.0/docs/overview/getting-started/create-account) Create your Gruntwork account and invite your team members to access Gruntwork resources. @@ -40,7 +41,8 @@ During the Pipelines setup process, configure Gruntwork Account Factory for AWS ### Step 6: Start using DevOps Foundations You're all set! You can now: + - [Build with the Gruntwork IaC Library](/2.0/docs/library/tutorials/deploying-your-first-gruntwork-module) -- Automatically [plan and apply IaC changes with Pipelines](/2.0/docs/pipelines/guides/running-plan-apply) +- [Automatically plan and apply IaC changes with Pipelines](/2.0/docs/pipelines/guides/running-plan-apply) - [Vend new AWS accounts with Account Factory](/2.0/docs/accountfactory/guides/vend-aws-account) - [Keep your infrastructure up to date with Patcher](/2.0/docs/patcher/concepts/) From 09b604c1deca93d70fd79b206ac7430d9958e6c5 Mon Sep 17 00:00:00 2001 From: Yousif Akbar <11247449+yhakbar@users.noreply.github.com> Date: Tue, 7 Oct 2025 13:06:30 -0400 Subject: [PATCH 39/39] chore: Pushing up failed attempt for context --- src/components/.d.ts | 4 + .../pipelines/CloudSpecificBootstra.tsx | 487 +++++++++++++++++ .../pipelines/CloudSpecificBootstrap.tsx | 498 ++++++++++++++++++ .../CloudSpecificBootstrap.tsx-attempt-1 | 498 ++++++++++++++++++ .../pipelines/snippets/aws-root.hcl | 32 ++ .../snippets/azure-bootstrap-output.hcl | 8 + .../pipelines/snippets/azure-root.hcl | 35 ++ 7 files changed, 1562 insertions(+) create mode 100644 src/components/pipelines/CloudSpecificBootstra.tsx create mode 100644 src/components/pipelines/CloudSpecificBootstrap.tsx create mode 100644 src/components/pipelines/CloudSpecificBootstrap.tsx-attempt-1 create mode 100644 src/components/pipelines/snippets/aws-root.hcl create mode 100644 src/components/pipelines/snippets/azure-bootstrap-output.hcl create mode 100644 src/components/pipelines/snippets/azure-root.hcl diff --git a/src/components/.d.ts b/src/components/.d.ts index 2ab174fdbc..90055ebcde 100644 --- a/src/components/.d.ts +++ b/src/components/.d.ts @@ -1 +1,5 @@ declare module "*.module.css" +declare module "!!raw-loader!*" { + const content: string; + export default content; +} diff --git a/src/components/pipelines/CloudSpecificBootstra.tsx b/src/components/pipelines/CloudSpecificBootstra.tsx new file mode 100644 index 0000000000..b8f6e69005 --- /dev/null +++ b/src/components/pipelines/CloudSpecificBootstra.tsx @@ -0,0 +1,487 @@ +import React from "react"; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import Admonition from '@theme/Admonition'; +import CodeBlock from '@theme/CodeBlock'; +import PersistentCheckbox from '../PersistentCheckbox'; + +export const CloudSpecificBootstrap = () => { + return ( + <> + + + +The resources you need provisioned in AWS to start managing resources with Pipelines are: + +1. An OpenID Connect (OIDC) provider +2. An IAM role for Pipelines to assume when running Terragrunt plan commands +3. An IAM role for Pipelines to assume when running Terragrunt apply commands + +For every account you want Pipelines to manage infrastructure in. + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Use Boilerplate to scaffold bootstrap configurations in your project for each AWS account +2. Use Terragrunt to provision these resources in your AWS accounts +3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap Your Project for AWS

+ +First, confirm that you have a `root.hcl` file in the root of your project that looks something like this: + +```hcl title="root.hcl" +locals { + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) + state_bucket_name = local.account_hcl.locals.state_bucket_name + + region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) + aws_region = local.region_hcl.locals.aws_region +} + +remote_state { + backend = "s3" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + bucket = local.state_bucket_name + region = local.aws_region + key = "${path_relative_to_include()}/tofu.tfstate" + encrypt = true + use_lockfile = true + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provision AWS Bootstrap Resources

+ +Once you've scaffolded out the accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts. + +:::tip + +Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it. + +You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role. + +::: + +For each account you want to bootstrap, you'll need to run the following commands: + +First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the AWS account you want to bootstrap. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + +Next, apply the changes to your account. + +```bash title="name-of-account/_global/bootstrap" +terragrunt run --all --non-interactive --provider-cache apply +``` + +:::note Progress Checklist + + + + +::: + +
+ + +The resources you need provisioned in Azure to start managing resources with Pipelines are: + +1. An Azure Resource Group for OpenTofu state resources + 1. An Azure Storage Account in that resource group for OpenTofu state storage + 1. An Azure Storage Container in that storage account for OpenTofu state storage +2. An Entra ID Application to use for plans + 1. A Flexible Federated Identity Credential for the application to authenticate with your project on any branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + 2. A role assignment for the service principal to access the Azure Storage Account +3. An Entra ID Application to use for applies + 1. A Federated Identity Credential for the application to authenticate with your project on the deploy branch + 2. A Service Principal for the application to be used in role assignments + 1. A role assignment for the service principal to access the Azure subscription + +:::tip Don't Panic! + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + +::: + +The process that we'll follow to get these resources ready for Pipelines is: + +1. Use Boilerplate to scaffold bootstrap configurations in your project for each Azure subscription +2. Use Terragrunt to provision these resources in your Azure subscription +3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned +4. Pull the bootstrap resources into state, now that we have configured a remote state backend +5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Bootstrap Your Project for Azure

+ +For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each subscription: + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \ + --output-folder . +``` + +:::tip + +You'll need to run this boilerplate command once for each Azure subscription you want to manage with Pipelines. Boilerplate will prompt you for subscription-specific values each time. + +::: + +:::tip + +You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \ + --output-folder . \ + --var 'AccountName=dev' \ + --var 'GitLabGroupName=acme' \ + --var 'GitLabRepoName=infrastructure-live' \ + --var 'GitLabInstanceURL=https://gitlab.com' \ + --var 'SubscriptionName=dev' \ + --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \ + --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \ + --var 'AzureLocation=East US' \ + --var 'StateResourceGroupName=pipelines-rg' \ + --var 'StateStorageAccountName=mysa' \ + --var 'StateStorageContainerName=tfstate' \ + --non-interactive +``` + +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + +```yaml title="vars.yml" +AccountName: dev +GitLabGroupName: acme +GitLabRepoName: infrastructure-live +GitLabInstanceURL: https://gitlab.com +SubscriptionName: dev +AzureTenantID: 00000000-0000-0000-0000-000000000000 +AzureSubscriptionID: 11111111-1111-1111-1111-111111111111 +AzureLocation: East US +StateResourceGroupName: pipelines-rg +StateStorageAccountName: my-storage-account +StateStorageContainerName: tfstate +``` + +```bash +boilerplate \ + --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \ + --output-folder . \ + --var-file vars.yml \ + --non-interactive +``` + +::: + +:::note Progress Checklist + + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Provision Azure Bootstrap Resources

+ +Once you've scaffolded out the subscriptions you want to bootstrap, you can use Terragrunt to provision the resources in your Azure subscription. + +If you haven't already, you'll want to authenticate to Azure using the `az` CLI. + +```bash +az login +``` + +:::note Progress Checklist + + + +::: + + +To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: + +- `ARM_TENANT_ID` +- `ARM_SUBSCRIPTION_ID` + +For example: + +```bash +export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" +export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" +``` + +:::note Progress Checklist + + + +::: + +First, make sure that everything is set up correctly by running a plan in the subscription directory. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache plan +``` + +:::tip + +We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + +::: + +:::note Progress Checklist + + + +::: + +Next, apply the changes to your subscription. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply +``` + +:::tip + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + +::: + +:::note Progress Checklist + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Finalizing Terragrunt configurations

+ +Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned. + +First, edit the `root.hcl` file in the root of your project to leverage the storage account we just provisioned. + +If your `root.hcl` file doesn't already have a remote state backend configuration, you'll need to add one that looks like this: + +```hcl title="root.hcl" +locals { + sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl")) + + state_resource_group_name = local.sub_hcl.locals.state_resource_group_name + state_storage_account_name = local.sub_hcl.locals.state_storage_account_name + state_storage_container_name = local.sub_hcl.locals.state_storage_container_name +} + +remote_state { + backend = "azurerm" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + resource_group_name = local.state_resource_group_name + storage_account_name = local.state_storage_account_name + container_name = local.state_storage_container_name + key = "${path_relative_to_include()}/tofu.tfstate" + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = < + +::: + +Next, finalize the `.gruntwork/environment-.hcl` file in the root of your project to reference the IDs for the applications we just provisioned. + +You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`. + +```bash +terragrunt stack output +``` + +The relevant bits that you want to extract from the stack output are the following: + +```hcl +bootstrap = { + apply_app = { + client_id = "33333333-3333-3333-3333-333333333333" + } + plan_app = { + client_id = "44444444-4444-4444-4444-444444444444" + } +} +``` + +You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file. + +:::note Progress Checklist + + + + +::: + +{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */} +

Pulling the resources into state

+ +Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned. + +```bash title="name-of-subscription" +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy +``` + +:::tip + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + +::: + +:::note Progress Checklist + + + +::: + +
+
+ +); +} diff --git a/src/components/pipelines/CloudSpecificBootstrap.tsx b/src/components/pipelines/CloudSpecificBootstrap.tsx new file mode 100644 index 0000000000..7693722140 --- /dev/null +++ b/src/components/pipelines/CloudSpecificBootstrap.tsx @@ -0,0 +1,498 @@ +import React from "react"; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import Admonition from '@theme/Admonition'; +import CodeBlock from '@theme/CodeBlock'; +import PersistentCheckbox from '../PersistentCheckbox'; +import AwsRootHcl from '!!raw-loader!./snippets/aws-root.hcl'; +import AzureRootHcl from '!!raw-loader!./snippets/azure-root.hcl'; +import AzureBootstrapOutputHcl from '!!raw-loader!./snippets/azure-bootstrap-output.hcl'; + +interface CloudSpecificBootstrapProps { + gitProvider: 'github' | 'gitlab'; +} + +// Helper functions to generate provider-specific content +const getGitProviderConfig = (provider: 'github' | 'gitlab') => { + if (provider === 'github') { + return { + awsTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0', + azureTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0', + orgVarName: 'GitHubOrgName', + repoVarName: 'GitHubRepoName', + orgLabel: 'GitHub Organization', + repoLabel: 'GitHub Repository', + instanceUrlVar: null, + instanceUrlLabel: null, + issuerVar: null, + issuerLabel: null, + }; + } else { + return { + awsTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/account?ref=v1.0.0', + azureTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0', + orgVarName: 'GitLabGroupName', + repoVarName: 'GitLabRepoName', + orgLabel: 'GitLab Group', + repoLabel: 'GitLab Repository', + instanceUrlVar: 'GitLabInstanceURL', + instanceUrlLabel: 'GitLab Instance URL', + issuerVar: 'Issuer', + issuerLabel: 'Issuer URL', + }; + } +}; + +export const CloudSpecificBootstrap = ({ gitProvider }: CloudSpecificBootstrapProps) => { + const config = getGitProviderConfig(gitProvider); + + return ( + <> + + + +

The resources you need provisioned in AWS to start managing resources with Pipelines are:

+ +
    +
  1. An OpenID Connect (OIDC) provider
  2. +
  3. An IAM role for Pipelines to assume when running Terragrunt plan commands
  4. +
  5. An IAM role for Pipelines to assume when running Terragrunt apply commands
  6. +
+ +

For every account you want Pipelines to manage infrastructure in.

+ + + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the terragrunt-scale-catalog repository. + + + +

The process that we'll follow to get these resources ready for Pipelines is:

+ +
    +
  1. Use Boilerplate to scaffold bootstrap configurations in your project for each AWS account
  2. +
  3. Use Terragrunt to provision these resources in your AWS accounts
  4. +
  5. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines
  6. +
+ +

Bootstrap Your Project for AWS

+ +

First, confirm that you have a `root.hcl` file in the root of your project that looks something like this:

+ +{AwsRootHcl} + +

If you don't have a `root.hcl` file, you might need to customize the bootstrapping process, as the Terragrunt scale catalog expects a `root.hcl` file in the root of the project. Please contact [Gruntwork support](/support) for assistance if you need help.

+ +

For each AWS account that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each account:

+ + +boilerplate \ + --template-url '{config.awsTemplateUrl}' \ + --output-folder . + + + + +You'll need to run this boilerplate command once for each AWS account you want to manage with Pipelines. Boilerplate will prompt you for account-specific values each time. + + + + + +You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + + +{`boilerplate \\ + --template-url '${config.awsTemplateUrl}' \\ + --output-folder . \\ + --var 'AccountName=dev' \\ + --var '${config.orgVarName}=acme' \\ + --var '${config.repoVarName}=infrastructure-live' \\ + ${config.instanceUrlVar ? `--var '${config.instanceUrlVar}=https://gitlab.com' \\` : ''} + --var 'AWSAccountID=123456789012' \\ + --var 'AWSRegion=us-east-1' \\ + --var 'StateBucketName=my-state-bucket' \\ + --non-interactive`} + + +{gitProvider === 'gitlab' && ( + <> +

If you're using a self-hosted GitLab instance, you'll want to make sure the issuer is set correctly when calling Boilerplate.

+ + + {`boilerplate \\ + --template-url '${config.awsTemplateUrl}' \\ + --output-folder . \\ + --var 'AccountName=dev' \\ + --var '${config.orgVarName}=acme' \\ + --var '${config.repoVarName}=infrastructure-live' \\ + --var '${config.instanceUrlVar}=https://gitlab.com' \\ + --var 'AWSAccountID=123456789012' \\ + --var 'AWSRegion=us-east-1' \\ + --var 'StateBucketName=my-state-bucket' \\ + --var '${config.issuerVar}=$$ISSUER_URL$$' \\ + --non-interactive`} + + +)} + +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + + +{`AccountName: dev +${config.orgVarName}: acme +${config.repoVarName}: infrastructure-live +${config.instanceUrlVar ? `${config.instanceUrlVar}: https://gitlab.com` : ''} +AWSAccountID: 123456789012 +AWSRegion: us-east-1 +StateBucketName: my-state-bucket`} + + + +{`boilerplate \\ + --template-url '${config.awsTemplateUrl}' \\ + --output-folder . \\ + --var-file vars.yml \\ + --non-interactive`} + + +
+ + + + + + + +

Provision AWS Bootstrap Resources

+ +

Once you've scaffolded out the accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts.

+ + + +

Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it.

+ +

You can follow the documentation here to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role.

+ +
+ +

For each account you want to bootstrap, you'll need to run the following commands:

+ +

First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the AWS account you want to bootstrap.

+ + +terragrunt run --all --non-interactive --provider-cache plan + + + + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + + + +Next, apply the changes to your account. + + +terragrunt run --all --non-interactive --provider-cache apply + + + + + + + + + +
+ + + +

The resources you need provisioned in Azure to start managing resources with Pipelines are:

+ +
    +
  1. + An Azure Resource Group for OpenTofu state resources +
      +
    1. + An Azure Storage Account in that resource group for OpenTofu state storage +
        +
      1. + An Azure Storage Container in that storage account for OpenTofu state storage +
      2. +
      +
    2. +
    +
  2. +
  3. + An Entra ID Application to use for plans +
      +
    1. + A Flexible Federated Identity Credential for the application to authenticate with your project on any branch +
    2. +
    3. + A Service Principal for the application to be used in role assignments +
        +
      1. + A role assignment for the service principal to access the Azure subscription +
      2. +
      3. + A role assignment for the service principal to access the Azure Storage Account +
      4. +
      +
    4. +
    +
  4. +
  5. + An Entra ID Application to use for applies +
      +
    1. + A Federated Identity Credential for the application to authenticate with your project on the deploy branch +
    2. +
    3. + A Service Principal for the application to be used in role assignments +
        +
      1. + A role assignment for the service principal to access the Azure subscription +
      2. +
      +
    4. +
    +
  6. +
+ + + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + + + +The process that we'll follow to get these resources ready for Pipelines is: + +
    +
  1. Use Boilerplate to scaffold bootstrap configurations in your project for each Azure subscription
  2. +
  3. Use Terragrunt to provision these resources in your Azure subscription
  4. +
  5. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned
  6. +
  7. Pull the bootstrap resources into state, now that we have configured a remote state backend
  8. +
  9. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines
  10. +
+ +

Bootstrap Your Project for Azure

+ +

For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each subscription:

+ + +{`boilerplate \\ + --template-url '${config.azureTemplateUrl}' \\ + --output-folder .`} + + + + +You'll need to run this boilerplate command once for each Azure subscription you want to manage with Pipelines. Boilerplate will prompt you for subscription-specific values each time. + + + + + +

You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.

+ +

Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.

+ +

e.g.

+ + +{`boilerplate \\ + --template-url '${config.azureTemplateUrl}' \\ + --output-folder . \\ + --var 'AccountName=dev' \\ + --var '${config.orgVarName}=acme' \\ + --var '${config.repoVarName}=infrastructure-live' \\ + ${config.instanceUrlVar ? `--var '${config.instanceUrlVar}=https://gitlab.com' \\` : ''} + --var 'SubscriptionName=dev' \\ + --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \\ + --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \\ + --var 'AzureLocation=East US' \\ + --var 'StateResourceGroupName=pipelines-rg' \\ + --var 'StateStorageAccountName=mysa' \\ + --var 'StateStorageContainerName=tfstate' \\ + --non-interactive`} + + +

You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag.

+ + +{`AccountName: dev +${config.orgVarName}: acme +${config.repoVarName}: infrastructure-live +${config.instanceUrlVar ? `${config.instanceUrlVar}: https://gitlab.com` : ''} +SubscriptionName: dev +AzureTenantID: 00000000-0000-0000-0000-000000000000 +AzureSubscriptionID: 11111111-1111-1111-1111-111111111111 +AzureLocation: East US +StateResourceGroupName: pipelines-rg +StateStorageAccountName: my-storage-account +StateStorageContainerName: tfstate`} + + + +{`boilerplate \\ + --template-url '${config.azureTemplateUrl}' \\ + --output-folder . \\ + --var-file vars.yml \\ + --non-interactive`} + + +
+ + + + + + + +

Provision Azure Bootstrap Resources

+ +

Once you've scaffolded out the subscriptions you want to bootstrap, you can use Terragrunt to provision the resources in your Azure subscription.

+ +

If you haven't already, you'll want to authenticate to Azure using the `az` CLI.

+ + +az login + + + + + + + + + +To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: + +- `ARM_TENANT_ID` +- `ARM_SUBSCRIPTION_ID` + +For example: + + +export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" +export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" + + + + + + + + +First, make sure that everything is set up correctly by running a plan in the subscription directory. + + +terragrunt run --all --non-interactive --provider-cache plan + + + + +We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + + + + + + + + + +Next, apply the changes to your subscription. + + +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply + + + + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + + + + + + + + +

Finalizing Terragrunt configurations

+ +Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned. + +First, edit the `root.hcl` file in the root of your project to leverage the storage account we just provisioned. + +If your `root.hcl` file doesn't already have a remote state backend configuration, you'll need to add one that looks like this: + + +{AzureRootHcl} + + + + + + + + +

Next, finalize the `.gruntwork/environment-(name-of-subscription).hcl` file in the root of your project to reference the IDs for the applications we just provisioned.

+ +

You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`.

+ + +terragrunt stack output + + +

The relevant bits that you want to extract from the stack output are the following:

+ + +{AzureBootstrapOutputHcl} + + +

You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-(name-of-subscription).hcl` file.

+ + + + + + + + +

Pulling the resources into state

+ +

Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned.

+ + +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy + + + + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + + + + + + + + + +
+
+ + ); +}; + +export default CloudSpecificBootstrap; diff --git a/src/components/pipelines/CloudSpecificBootstrap.tsx-attempt-1 b/src/components/pipelines/CloudSpecificBootstrap.tsx-attempt-1 new file mode 100644 index 0000000000..907d53a3ce --- /dev/null +++ b/src/components/pipelines/CloudSpecificBootstrap.tsx-attempt-1 @@ -0,0 +1,498 @@ +import React from "react"; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import Admonition from '@theme/Admonition'; +import CodeBlock from '@theme/CodeBlock'; +import PersistentCheckbox from '../PersistentCheckbox'; +import AwsRootHcl from '!!raw-loader!./snippets/aws-root.hcl'; +import AzureRootHcl from '!!raw-loader!./snippets/azure-root.hcl'; +import AzureBootstrapOutputHcl from '!!raw-loader!./snippets/azure-bootstrap-output.hcl'; + +interface CloudSpecificBootstrapProps { + gitProvider: 'github' | 'gitlab'; +} + +// Helper functions to generate provider-specific content +const getGitProviderConfig = (provider: 'github' | 'gitlab') => { + if (provider === 'github') { + return { + awsTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0', + azureTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0', + orgVarName: 'GitHubOrgName', + repoVarName: 'GitHubRepoName', + orgLabel: 'GitHub Organization', + repoLabel: 'GitHub Repository', + instanceUrlVar: null, + instanceUrlLabel: null, + issuerVar: null, + issuerLabel: null, + }; + } else { + return { + awsTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/account?ref=v1.0.0', + azureTemplateUrl: 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0', + orgVarName: 'GitLabGroupName', + repoVarName: 'GitLabRepoName', + orgLabel: 'GitLab Group', + repoLabel: 'GitLab Repository', + instanceUrlVar: 'GitLabInstanceURL', + instanceUrlLabel: 'GitLab Instance URL', + issuerVar: 'Issuer', + issuerLabel: 'Issuer URL', + }; + } +}; + +export const CloudSpecificBootstrap = ({ gitProvider }: CloudSpecificBootstrapProps) => { + const config = getGitProviderConfig(gitProvider); + + return ( + <> + + + +

The resources you need provisioned in AWS to start managing resources with Pipelines are:

+ +
    +
  1. An OpenID Connect (OIDC) provider
  2. +
  3. An IAM role for Pipelines to assume when running Terragrunt plan commands
  4. +
  5. An IAM role for Pipelines to assume when running Terragrunt apply commands
  6. +
+ +

For every account you want Pipelines to manage infrastructure in.

+ + + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the terragrunt-scale-catalog repository. + + + +

The process that we'll follow to get these resources ready for Pipelines is:

+ +
    +
  1. Use Boilerplate to scaffold bootstrap configurations in your project for each AWS account
  2. +
  3. Use Terragrunt to provision these resources in your AWS accounts
  4. +
  5. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines
  6. +
+ +

Bootstrap Your Project for AWS

+ +

First, confirm that you have a `root.hcl` file in the root of your project that looks something like this:

+ +{AwsRootHcl} + +

If you don't have a `root.hcl` file, you might need to customize the bootstrapping process, as the Terragrunt scale catalog expects a `root.hcl` file in the root of the project. Please contact [Gruntwork support](/support) for assistance if you need help.

+ +

For each AWS account that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each account:

+ + +boilerplate \ + --template-url '{config.awsTemplateUrl}' \ + --output-folder . + + + + +You'll need to run this boilerplate command once for each AWS account you want to manage with Pipelines. Boilerplate will prompt you for account-specific values each time. + + + + + +You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something. + +Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case. + +e.g. + + +{`boilerplate \\ + --template-url '${config.awsTemplateUrl}' \\ + --output-folder . \\ + --var 'AccountName=dev' \\ + --var '${config.orgVarName}=acme' \\ + --var '${config.repoVarName}=infrastructure-live' \\ + ${config.instanceUrlVar ? `--var '${config.instanceUrlVar}=https://gitlab.com' \\` : ''} + --var 'AWSAccountID=123456789012' \\ + --var 'AWSRegion=us-east-1' \\ + --var 'StateBucketName=my-state-bucket' \\ + --non-interactive`} + + +{gitProvider === 'gitlab' && ( + <> +

If you're using a self-hosted GitLab instance, you'll want to make sure the issuer is set correctly when calling Boilerplate.

+ + + {`boilerplate \\ + --template-url '${config.awsTemplateUrl}' \\ + --output-folder . \\ + --var 'AccountName=dev' \\ + --var '${config.orgVarName}=acme' \\ + --var '${config.repoVarName}=infrastructure-live' \\ + --var '${config.instanceUrlVar}=https://gitlab.com' \\ + --var 'AWSAccountID=123456789012' \\ + --var 'AWSRegion=us-east-1' \\ + --var 'StateBucketName=my-state-bucket' \\ + --var '${config.issuerVar}=$$ISSUER_URL$$' \\ + --non-interactive`} + + +)} + +You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag. + + +{`AccountName: dev +${config.orgVarName}: acme +${config.repoVarName}: infrastructure-live +${config.instanceUrlVar ? `${config.instanceUrlVar}: https://gitlab.com` : ''} +AWSAccountID: 123456789012 +AWSRegion: us-east-1 +StateBucketName: my-state-bucket`} + + + +{`boilerplate \\ + --template-url '${config.awsTemplateUrl}' \\ + --output-folder . \\ + --var-file vars.yml \\ + --non-interactive`} + + +
+ + + + + + + +

Provision AWS Bootstrap Resources

+ +

Once you've scaffolded out the accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts.

+ + + +

Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it.

+ +

You can follow the documentation here to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role.

+ +
+ +

For each account you want to bootstrap, you'll need to run the following commands:

+ +

First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the AWS account you want to bootstrap.

+ + +terragrunt run --all --non-interactive --provider-cache plan + + + + +We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + + + +Next, apply the changes to your account. + + +terragrunt run --all --non-interactive --provider-cache apply + + + + + + + + + +
+ + + +

The resources you need provisioned in Azure to start managing resources with Pipelines are:

+ +
    +
  1. + An Azure Resource Group for OpenTofu state resources +
      +
    1. + An Azure Storage Account in that resource group for OpenTofu state storage +
        +
      1. + An Azure Storage Container in that storage account for OpenTofu state storage +
      2. +
      +
    2. +
    +
  2. +
  3. + An Entra ID Application to use for plans +
      +
    1. + A Flexible Federated Identity Credential for the application to authenticate with your project on any branch +
    2. +
    3. + A Service Principal for the application to be used in role assignments +
        +
      1. + A role assignment for the service principal to access the Azure subscription +
      2. +
      3. + A role assignment for the service principal to access the Azure Storage Account +
      4. +
      +
    4. +
    +
  4. +
  5. + An Entra ID Application to use for applies +
      +
    1. + A Federated Identity Credential for the application to authenticate with your project on the deploy branch +
    2. +
    3. + A Service Principal for the application to be used in role assignments +
        +
      1. + A role assignment for the service principal to access the Azure subscription +
      2. +
      +
    4. +
    +
  6. +
+ + + +This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project. + +If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository. + + + +The process that we'll follow to get these resources ready for Pipelines is: + +
    +
  1. Use Boilerplate to scaffold bootstrap configurations in your project for each Azure subscription
  2. +
  3. Use Terragrunt to provision these resources in your Azure subscription
  4. +
  5. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned
  6. +
  7. Pull the bootstrap resources into state, now that we have configured a remote state backend
  8. +
  9. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines
  10. +
+ +

Bootstrap Your Project for Azure

+ +

For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each subscription:

+ + +{`boilerplate \\ + --template-url '${config.azureTemplateUrl}' \\ + --output-folder .`} + + + + +You'll need to run this boilerplate command once for each Azure subscription you want to manage with Pipelines. Boilerplate will prompt you for subscription-specific values each time. + + + + + +

You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.

+ +

Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.

+ +

e.g.

+ + +{`boilerplate \\ + --template-url '${config.azureTemplateUrl}' \\ + --output-folder . \\ + --var 'AccountName=dev' \\ + --var '${config.orgVarName}=acme' \\ + --var '${config.repoVarName}=infrastructure-live' \\ + ${config.instanceUrlVar ? `--var '${config.instanceUrlVar}=https://gitlab.com' \\` : ''} + --var 'SubscriptionName=dev' \\ + --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \\ + --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \\ + --var 'AzureLocation=East US' \\ + --var 'StateResourceGroupName=pipelines-rg' \\ + --var 'StateStorageAccountName=mysa' \\ + --var 'StateStorageContainerName=tfstate' \\ + --non-interactive`} + + +

You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag.

+ + +{`AccountName: dev +${config.orgVarName}: acme +${config.repoVarName}: infrastructure-live +${config.instanceUrlVar ? `${config.instanceUrlVar}: https://gitlab.com` : ''} +SubscriptionName: dev +AzureTenantID: 00000000-0000-0000-0000-000000000000 +AzureSubscriptionID: 11111111-1111-1111-1111-111111111111 +AzureLocation: East US +StateResourceGroupName: pipelines-rg +StateStorageAccountName: my-storage-account +StateStorageContainerName: tfstate`} + + + +{`boilerplate \\ + --template-url '${config.azureTemplateUrl}' \\ + --output-folder . \\ + --var-file vars.yml \\ + --non-interactive`} + + +
+ + + + + + + +

Provision Azure Bootstrap Resources

+ +

Once you've scaffolded out the subscriptions you want to bootstrap, you can use Terragrunt to provision the resources in your Azure subscription.

+ +

If you haven't already, you'll want to authenticate to Azure using the `az` CLI.

+ + +az login + + + + + + + + + +To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI: + +- `ARM_TENANT_ID` +- `ARM_SUBSCRIPTION_ID` + +For example: + + +export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" +export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111" + + + + + + + + +First, make sure that everything is set up correctly by running a plan in the subscription directory. + + +terragrunt run --all --non-interactive --provider-cache plan + + + + +We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/). + + + + + + + + + +Next, apply the changes to your subscription. + + +terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply + + + + +We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state. + + + + + + + + +

Finalizing Terragrunt configurations

+ +Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned. + +First, edit the `root.hcl` file in the root of your project to leverage the storage account we just provisioned. + +If your `root.hcl` file doesn't already have a remote state backend configuration, you'll need to add one that looks like this: + + +{AzureRootHcl} + + + + + + + + +

Next, finalize the `.gruntwork/environment-(name-of-subscription).hcl` file in the root of your project to reference the IDs for the applications we just provisioned.

+ +

You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`.

+ + +terragrunt stack output + + +

The relevant bits that you want to extract from the stack output are the following:

+ + +{AzureBootstrapOutputHcl} + + +

You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-(name-of-subscription).hcl` file.

+ + + + + + + + +

Pulling the resources into state

+ +

Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned.

+ + +terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy + + + + +We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state. + + + + + + + + + +
+
+ + ); +}; + +export default CloudSpecificBootstrap; diff --git a/src/components/pipelines/snippets/aws-root.hcl b/src/components/pipelines/snippets/aws-root.hcl new file mode 100644 index 0000000000..94a19d7743 --- /dev/null +++ b/src/components/pipelines/snippets/aws-root.hcl @@ -0,0 +1,32 @@ +locals { + account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl")) + state_bucket_name = local.account_hcl.locals.state_bucket_name + + region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl")) + aws_region = local.region_hcl.locals.aws_region +} + +remote_state { + backend = "s3" + generate = { + path = "backend.tf" + if_exists = "overwrite" + } + config = { + bucket = local.state_bucket_name + region = local.aws_region + key = "${path_relative_to_include()}/tofu.tfstate" + encrypt = true + use_lockfile = true + } +} + +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = <