diff --git a/README.md b/README.md index 6a40c89856..62f7a230a3 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ use Flux already then you can easily add Weave GitOps to create a platform manag Mac / Linux ```console -curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.23.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp +curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.24.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp sudo mv /tmp/gitops /usr/local/bin gitops version ``` diff --git a/charts/gitops-server/Chart.yaml b/charts/gitops-server/Chart.yaml index fd8b85933e..c174ddf9f6 100644 --- a/charts/gitops-server/Chart.yaml +++ b/charts/gitops-server/Chart.yaml @@ -13,9 +13,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 4.0.21 +version: 4.0.22 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v0.23.0" +appVersion: "v0.24.0" diff --git a/charts/gitops-server/values.yaml b/charts/gitops-server/values.yaml index 3d93d24499..95e82c7ce6 100644 --- a/charts/gitops-server/values.yaml +++ b/charts/gitops-server/values.yaml @@ -10,7 +10,7 @@ image: repository: ghcr.io/weaveworks/wego-app pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: "v0.23.0" + tag: "v0.24.0" imagePullSecrets: [] nameOverride: "" fullnameOverride: "" diff --git a/package-lock.json b/package-lock.json index d9447ba0c6..5ff7135b42 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@weaveworks/weave-gitops", - "version": "0.24.0-rc.1", + "version": "0.24.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@weaveworks/weave-gitops", - "version": "0.24.0-rc.1", + "version": "0.24.0", "dependencies": { "@material-ui/core": "^4.12.3", "@material-ui/icons": "^4.11.2", diff --git a/package.json b/package.json index af4ed1d39c..220b589720 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@weaveworks/weave-gitops", - "version": "0.24.0-rc.1", + "version": "0.24.0", "description": "Weave GitOps core", "targets": { "default": { diff --git a/ui/components/__tests__/__snapshots__/Footer.test.tsx.snap b/ui/components/__tests__/__snapshots__/Footer.test.tsx.snap index 2d21d00215..1b990f3b5a 100644 --- a/ui/components/__tests__/__snapshots__/Footer.test.tsx.snap +++ b/ui/components/__tests__/__snapshots__/Footer.test.tsx.snap @@ -410,7 +410,7 @@ exports[`Footer snapshots no api version 1`] = ` /> @@ -421,7 +421,7 @@ exports[`Footer snapshots no api version 1`] = ` - v0.24.0-rc.1 + v0.24.0 diff --git a/website/docs/installation/weave-gitops.mdx b/website/docs/installation/weave-gitops.mdx index 9ba55b7d0f..f0b24164c4 100644 --- a/website/docs/installation/weave-gitops.mdx +++ b/website/docs/installation/weave-gitops.mdx @@ -107,7 +107,7 @@ import TabItem from "@theme/TabItem"; ```bash -curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.23.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp +curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.24.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp sudo mv /tmp/gitops /usr/local/bin gitops version ``` diff --git a/website/docs/references/cli-reference/gitops.md b/website/docs/references/cli-reference/gitops.md index 9b9740e346..266b700f38 100644 --- a/website/docs/references/cli-reference/gitops.md +++ b/website/docs/references/cli-reference/gitops.md @@ -50,4 +50,4 @@ Command line utility for managing Kubernetes applications via GitOps. * [gitops suspend](gitops_suspend.md) - Suspend a resource * [gitops version](gitops_version.md) - Display gitops version -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_beta.md b/website/docs/references/cli-reference/gitops_beta.md index a313ae24ff..9898dde169 100644 --- a/website/docs/references/cli-reference/gitops_beta.md +++ b/website/docs/references/cli-reference/gitops_beta.md @@ -24,4 +24,4 @@ This component contains unstable or still-in-development functionality * [gitops](gitops.md) - Weave GitOps * [gitops beta run](gitops_beta_run.md) - Set up an interactive sync between your cluster and your local file system -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_beta_run.md b/website/docs/references/cli-reference/gitops_beta_run.md index 55fd0a1fba..ed7d4c7ae8 100644 --- a/website/docs/references/cli-reference/gitops_beta_run.md +++ b/website/docs/references/cli-reference/gitops_beta_run.md @@ -60,7 +60,7 @@ gitops beta run ./charts/podinfo --timeout 3m --port-forward namespace=flux-syst --no-session Disable session management. If not specified, the session will be enabled by default. --port-forward string Forward the port from a cluster's resource to your local machine i.e. 'port=8080:8080,resource=svc/app'. --root-dir string Specify the root directory to watch for changes. If not specified, the root of Git repository will be used. - --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-0c92a3de-dirty") + --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-5c08e8e8-dirty") --session-namespace string Specify the namespace of the session. (default "default") --skip-dashboard-install Skip installation of the Dashboard. This also disables the prompt asking whether the Dashboard should be installed. --skip-resource-cleanup Skip resource cleanup. If not specified, the GitOps Run resources will be deleted by default. diff --git a/website/docs/references/cli-reference/gitops_check.md b/website/docs/references/cli-reference/gitops_check.md index 14b6269d26..e43812a66f 100644 --- a/website/docs/references/cli-reference/gitops_check.md +++ b/website/docs/references/cli-reference/gitops_check.md @@ -36,4 +36,4 @@ gitops check * [gitops](gitops.md) - Weave GitOps -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_completion.md b/website/docs/references/cli-reference/gitops_completion.md index 019ea097d1..d46ecac767 100644 --- a/website/docs/references/cli-reference/gitops_completion.md +++ b/website/docs/references/cli-reference/gitops_completion.md @@ -33,4 +33,4 @@ See each sub-command's help for details on how to use the generated script. * [gitops completion powershell](gitops_completion_powershell.md) - Generate the autocompletion script for powershell * [gitops completion zsh](gitops_completion_zsh.md) - Generate the autocompletion script for zsh -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_completion_bash.md b/website/docs/references/cli-reference/gitops_completion_bash.md index 3639679661..94c861eacf 100644 --- a/website/docs/references/cli-reference/gitops_completion_bash.md +++ b/website/docs/references/cli-reference/gitops_completion_bash.md @@ -52,4 +52,4 @@ gitops completion bash * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_completion_fish.md b/website/docs/references/cli-reference/gitops_completion_fish.md index d2047e7d18..342c8ea160 100644 --- a/website/docs/references/cli-reference/gitops_completion_fish.md +++ b/website/docs/references/cli-reference/gitops_completion_fish.md @@ -43,4 +43,4 @@ gitops completion fish [flags] * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_completion_powershell.md b/website/docs/references/cli-reference/gitops_completion_powershell.md index 57c36887ad..028132e477 100644 --- a/website/docs/references/cli-reference/gitops_completion_powershell.md +++ b/website/docs/references/cli-reference/gitops_completion_powershell.md @@ -40,4 +40,4 @@ gitops completion powershell [flags] * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_completion_zsh.md b/website/docs/references/cli-reference/gitops_completion_zsh.md index a931c191ef..df6caa3920 100644 --- a/website/docs/references/cli-reference/gitops_completion_zsh.md +++ b/website/docs/references/cli-reference/gitops_completion_zsh.md @@ -54,4 +54,4 @@ gitops completion zsh [flags] * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_create.md b/website/docs/references/cli-reference/gitops_create.md index 1c7e63748d..73dd5b8f35 100644 --- a/website/docs/references/cli-reference/gitops_create.md +++ b/website/docs/references/cli-reference/gitops_create.md @@ -46,4 +46,4 @@ gitops create terraform my-resource \ * [gitops create dashboard](gitops_create_dashboard.md) - Create a HelmRepository and HelmRelease to deploy Weave GitOps * [gitops create terraform](gitops_create_terraform.md) - Create a Terraform object -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_create_terraform.md b/website/docs/references/cli-reference/gitops_create_terraform.md index 74b68012a0..93ddcbdf35 100644 --- a/website/docs/references/cli-reference/gitops_create_terraform.md +++ b/website/docs/references/cli-reference/gitops_create_terraform.md @@ -50,4 +50,4 @@ gitops create terraform -n default my-resource --source GitRepository/my-project * [gitops create](gitops_create.md) - Creates a resource -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_delete.md b/website/docs/references/cli-reference/gitops_delete.md index 7bed81cde0..f3727cfd2e 100644 --- a/website/docs/references/cli-reference/gitops_delete.md +++ b/website/docs/references/cli-reference/gitops_delete.md @@ -24,4 +24,4 @@ Delete a resource * [gitops](gitops.md) - Weave GitOps * [gitops delete terraform](gitops_delete_terraform.md) - Delete a Terraform object -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_delete_terraform.md b/website/docs/references/cli-reference/gitops_delete_terraform.md index 655a74589a..f12ca592a9 100644 --- a/website/docs/references/cli-reference/gitops_delete_terraform.md +++ b/website/docs/references/cli-reference/gitops_delete_terraform.md @@ -38,4 +38,4 @@ gitops delete terraform -n default my-resource * [gitops delete](gitops_delete.md) - Delete a resource -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_get.md b/website/docs/references/cli-reference/gitops_get.md index 7e39d7f469..edaca4db58 100644 --- a/website/docs/references/cli-reference/gitops_get.md +++ b/website/docs/references/cli-reference/gitops_get.md @@ -37,4 +37,4 @@ echo -n $PASSWORD | gitops get bcrypt-hash * [gitops get bcrypt-hash](gitops_get_bcrypt-hash.md) - Generates a hashed secret * [gitops get config](gitops_get_config.md) - Prints out the CLI configuration for Weave GitOps -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md b/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md index 3b793407b7..5b7326ed9b 100644 --- a/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md +++ b/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md @@ -36,4 +36,4 @@ echo -n $PASSWORD | gitops get bcrypt-hash * [gitops get](gitops_get.md) - Display one or many Weave GitOps resources -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_logs.md b/website/docs/references/cli-reference/gitops_logs.md index b8fdfc3d7d..76220c4e2d 100644 --- a/website/docs/references/cli-reference/gitops_logs.md +++ b/website/docs/references/cli-reference/gitops_logs.md @@ -24,4 +24,4 @@ Get logs for a resource * [gitops](gitops.md) - Weave GitOps * [gitops logs terraform](gitops_logs_terraform.md) - Get the runner logs of a Terraform object -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_logs_terraform.md b/website/docs/references/cli-reference/gitops_logs_terraform.md index ab7c5ad141..9e903456e2 100644 --- a/website/docs/references/cli-reference/gitops_logs_terraform.md +++ b/website/docs/references/cli-reference/gitops_logs_terraform.md @@ -38,4 +38,4 @@ gitops logs terraform --namespace flux-system my-resource * [gitops logs](gitops_logs.md) - Get logs for a resource -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_remove.md b/website/docs/references/cli-reference/gitops_remove.md index 6a9b6b952c..d121752fa6 100644 --- a/website/docs/references/cli-reference/gitops_remove.md +++ b/website/docs/references/cli-reference/gitops_remove.md @@ -24,4 +24,4 @@ Remove various components of Weave GitOps * [gitops](gitops.md) - Weave GitOps * [gitops remove run](gitops_remove_run.md) - Remove GitOps Run sessions -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_replan.md b/website/docs/references/cli-reference/gitops_replan.md index b90659a655..ec24bc69e5 100644 --- a/website/docs/references/cli-reference/gitops_replan.md +++ b/website/docs/references/cli-reference/gitops_replan.md @@ -33,4 +33,4 @@ gitops replan terraform --namespace flux-system my-resource * [gitops](gitops.md) - Weave GitOps * [gitops replan terraform](gitops_replan_terraform.md) - Trigger replan for a Terraform object -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_replan_terraform.md b/website/docs/references/cli-reference/gitops_replan_terraform.md index 8d6e876136..21e884fe72 100644 --- a/website/docs/references/cli-reference/gitops_replan_terraform.md +++ b/website/docs/references/cli-reference/gitops_replan_terraform.md @@ -38,4 +38,4 @@ gitops replan terraform --namespace flux-system my-resource * [gitops replan](gitops_replan.md) - Replan a resource -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_resume.md b/website/docs/references/cli-reference/gitops_resume.md index 5da1f4f491..1f07bf5b59 100644 --- a/website/docs/references/cli-reference/gitops_resume.md +++ b/website/docs/references/cli-reference/gitops_resume.md @@ -33,4 +33,4 @@ gitops resume terraform --namespace flux-system my-resource * [gitops](gitops.md) - Weave GitOps * [gitops resume terraform](gitops_resume_terraform.md) - Resume a Terraform object -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_resume_terraform.md b/website/docs/references/cli-reference/gitops_resume_terraform.md index f6bc13dc15..77becfade6 100644 --- a/website/docs/references/cli-reference/gitops_resume_terraform.md +++ b/website/docs/references/cli-reference/gitops_resume_terraform.md @@ -38,4 +38,4 @@ gitops resume terraform --namespace flux-system my-resource * [gitops resume](gitops_resume.md) - Resume a resource -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_run.md b/website/docs/references/cli-reference/gitops_run.md index 508d85b0ed..a609989ec7 100644 --- a/website/docs/references/cli-reference/gitops_run.md +++ b/website/docs/references/cli-reference/gitops_run.md @@ -60,7 +60,7 @@ gitops beta run ./charts/podinfo --timeout 3m --port-forward namespace=flux-syst --no-session Disable session management. If not specified, the session will be enabled by default. --port-forward string Forward the port from a cluster's resource to your local machine i.e. 'port=8080:8080,resource=svc/app'. --root-dir string Specify the root directory to watch for changes. If not specified, the root of Git repository will be used. - --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-0c92a3de-dirty") + --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-5c08e8e8-dirty") --session-namespace string Specify the namespace of the session. (default "default") --skip-dashboard-install Skip installation of the Dashboard. This also disables the prompt asking whether the Dashboard should be installed. --skip-resource-cleanup Skip resource cleanup. If not specified, the GitOps Run resources will be deleted by default. diff --git a/website/docs/references/cli-reference/gitops_set.md b/website/docs/references/cli-reference/gitops_set.md index add01ccbe0..9c27c5666b 100644 --- a/website/docs/references/cli-reference/gitops_set.md +++ b/website/docs/references/cli-reference/gitops_set.md @@ -32,4 +32,4 @@ gitops set config analytics true * [gitops](gitops.md) - Weave GitOps * [gitops set config](gitops_set_config.md) - Set the CLI configuration for Weave GitOps -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_suspend.md b/website/docs/references/cli-reference/gitops_suspend.md index d141a75527..9e25166dfb 100644 --- a/website/docs/references/cli-reference/gitops_suspend.md +++ b/website/docs/references/cli-reference/gitops_suspend.md @@ -33,4 +33,4 @@ gitops resume terraform --namespace flux-system my-resource * [gitops](gitops.md) - Weave GitOps * [gitops suspend terraform](gitops_suspend_terraform.md) - Suspend a Terraform object -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_suspend_terraform.md b/website/docs/references/cli-reference/gitops_suspend_terraform.md index cf072c08aa..846e4ae144 100644 --- a/website/docs/references/cli-reference/gitops_suspend_terraform.md +++ b/website/docs/references/cli-reference/gitops_suspend_terraform.md @@ -38,4 +38,4 @@ gitops suspend terraform --namespace flux-system my-resource * [gitops suspend](gitops_suspend.md) - Suspend a resource -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/cli-reference/gitops_version.md b/website/docs/references/cli-reference/gitops_version.md index 5c329a848d..bbd2e9f269 100644 --- a/website/docs/references/cli-reference/gitops_version.md +++ b/website/docs/references/cli-reference/gitops_version.md @@ -27,4 +27,4 @@ gitops version [flags] * [gitops](gitops.md) - Weave GitOps -###### Auto generated by spf13/cobra on 11-May-2023 +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/docs/references/helm-reference.md b/website/docs/references/helm-reference.md index ba7395c6b3..a091b3e92b 100644 --- a/website/docs/references/helm-reference.md +++ b/website/docs/references/helm-reference.md @@ -5,7 +5,7 @@ This is a reference of all the configurable values in weave gitops's helm chart. This is intended for customizing your installation after you've gone through the [getting started](../getting-started/intro.mdx) guide. -This reference was generated for the chart version 4.0.21 which installs weave gitops v0.23.0. +This reference was generated for the chart version 4.0.22 which installs weave gitops v0.24.0. ## Values @@ -27,7 +27,7 @@ This reference was generated for the chart version 4.0.21 which installs weave g | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"ghcr.io/weaveworks/wego-app"` | | -| image.tag | string | `"v0.23.0"` | | +| image.tag | string | `"v0.24.0"` | | | imagePullSecrets | list | `[]` | | | ingress.annotations | object | `{}` | | | ingress.className | string | `""` | | diff --git a/website/versioned_docs/version-0.24.0/_components/CurlCodeBlock.jsx b/website/versioned_docs/version-0.24.0/_components/CurlCodeBlock.jsx new file mode 100644 index 0000000000..b27993ae63 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/_components/CurlCodeBlock.jsx @@ -0,0 +1,24 @@ +import React from "react"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +export default function CurlCodeBlock({ localPath, hostedPath, content }) { + return ( + <> + + {() => ( + + curl -o {localPath} {window.location.protocol} + //{window.location.host} + {hostedPath} + + )} + + + + {content} + + + ); +} diff --git a/website/versioned_docs/version-0.24.0/_components/TierLabel.jsx b/website/versioned_docs/version-0.24.0/_components/TierLabel.jsx new file mode 100644 index 0000000000..1bb36bdbaf --- /dev/null +++ b/website/versioned_docs/version-0.24.0/_components/TierLabel.jsx @@ -0,0 +1,20 @@ +import React from "react"; +import Link from "@docusaurus/Link"; +import useGlobalData from "@docusaurus/useGlobalData"; + +const containerStyle = { + fontSize: 16, + marginLeft: 4, + fontVariant: "all-small-caps", +}; + +export default function TierLabel({ tiers }) { + return ( + + {tiers} + + ); +} diff --git a/website/versioned_docs/version-0.24.0/_components/_alpha_warning.mdx b/website/versioned_docs/version-0.24.0/_components/_alpha_warning.mdx new file mode 100644 index 0000000000..48e5112fb7 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/_components/_alpha_warning.mdx @@ -0,0 +1,9 @@ + +:::caution + +**This feature is in alpha and certain aspects will change** + +We're very excited for people to use this feature. +However, please note that changes in the API, behaviour and security will evolve. +The feature is suitable to use in controlled testing environments. +::: \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/assets/example-enterprise-helm.yaml b/website/versioned_docs/version-0.24.0/assets/example-enterprise-helm.yaml new file mode 100644 index 0000000000..d2ce6eb1c4 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/assets/example-enterprise-helm.yaml @@ -0,0 +1,48 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weave-gitops-enterprise-charts + namespace: flux-system +spec: + interval: 60m + secretRef: + name: weave-gitops-enterprise-credentials + url: https://charts.dev.wkp.weave.works/releases/charts-v3 +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops-enterprise + namespace: flux-system +spec: + chart: + spec: + interval: 65m + chart: mccp + sourceRef: + kind: HelmRepository + name: weave-gitops-enterprise-charts + namespace: flux-system + version: 0.22.0 + install: + crds: CreateReplace + upgrade: + crds: CreateReplace + interval: 50m + values: + # -- Configure TLS settings if needed + # tls: + # -- Can be disabled if TLS is handled by a user-provided ingress controller + # enabled: true + # -- optionally specify a TLS secret + # secretName: null + config: + capi: + repositoryURL: https://github.com/$GITHUB_USER/fleet-infra + # -- Can be changed depending on your git repo structure + # repositoryPath: ./clusters/management/clusters + # repositoryClustersPath: ./cluster + git: + type: github + # -- Change if using on-prem github/gitlab + # hostname: https://github.com diff --git a/website/versioned_docs/version-0.24.0/assets/templates/.keep b/website/versioned_docs/version-0.24.0/assets/templates/.keep new file mode 100644 index 0000000000..dc92bc0885 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/assets/templates/.keep @@ -0,0 +1 @@ +"# keep" \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/assets/templates/capd-template.yaml b/website/versioned_docs/version-0.24.0/assets/templates/capd-template.yaml new file mode 100644 index 0000000000..96e687afbe --- /dev/null +++ b/website/versioned_docs/version-0.24.0/assets/templates/capd-template.yaml @@ -0,0 +1,162 @@ +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: cluster-template-development + namespace: default + annotations: + templates.weave.works/add-common-bases: "true" + templates.weave.works/inject-prune-annotation: "true" + labels: + weave.works/template-type: cluster +spec: + description: A simple CAPD template + params: + - name: CLUSTER_NAME + required: true + description: This is used for the cluster naming. + - name: NAMESPACE + description: Namespace to create the cluster in + - name: KUBERNETES_VERSION + description: Kubernetes version to use for the cluster + options: ["1.19.11", "1.21.1", "1.22.0", "1.23.3"] + - name: CONTROL_PLANE_MACHINE_COUNT + description: Number of control planes + options: ["1", "2", "3"] + - name: WORKER_MACHINE_COUNT + description: Number of worker machines + resourcetemplates: + - content: + - apiVersion: gitops.weave.works/v1alpha1 + kind: GitopsCluster + metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + labels: + weave.works/capi: bootstrap + spec: + capiClusterRef: + name: "${CLUSTER_NAME}" + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + labels: + cni: calico + spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.128.0.0/12 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + replicas: "${CONTROL_PLANE_MACHINE_COUNT}" + version: "${KUBERNETES_VERSION}" + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + spec: + template: + spec: {} + - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + spec: + clusterName: "${CLUSTER_NAME}" + replicas: "${WORKER_MACHINE_COUNT}" + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + version: "${KUBERNETES_VERSION}" diff --git a/website/versioned_docs/version-0.24.0/cluster-management/add-applications.mdx b/website/versioned_docs/version-0.24.0/cluster-management/add-applications.mdx new file mode 100644 index 0000000000..773cc0892e --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/add-applications.mdx @@ -0,0 +1,34 @@ +--- +title: Add Applications +hide_title: true +--- + +import TierLabel from "../\_components/TierLabel"; + +# Add Applications + +It is always useful to be able to install software packages to bootstrapped cluster. Weave GitOps Enterprise enables this by adding applications to target cluster through the UI by adding a kustomization or a helmrelease. Here is how we can do that: + +### Add an Applications to a target cluster (bootstrapped) + +1. At the applications page you can now find an add applications button. + +![Profiles Selection](./img/add-application-btn.png) + +2. A form will show up to you so that you can select the target cluster that you want to add application to. + +![Profiles Selection](./img/add-application-form.png) + +3. Then you will be able to select either source type of git repository or helm repository from the selected cluster. + +![Profiles Selection](./img/add-application-select-source.png) + +4. If you select git repository as source type, you will be abl to add application from kustomization. + +![Profiles Selection](./img/add-application-kustomization.png) + +5. If you select helm repository as source type, you will be abl to add application from helm release. And if you had selected the profiles Helm chart repository URL, you will be able to select a profile from the Profiles list that have been created in at [Profiles](profiles.mdx). + +![Profiles Selection](./img/add-application-helm-release.png) + +6. Last but not least now you will be able to create a pr to the target cluster. And you can see a new PR at you GitOps repository. diff --git a/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/calico-crs-configmap.yaml b/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/calico-crs-configmap.yaml new file mode 100644 index 0000000000..4293707dca --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/calico-crs-configmap.yaml @@ -0,0 +1,2441 @@ +apiVersion: v1 +data: + calico.yaml: "---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap + is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: + v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # Typha + is disabled.\n typha_service_name: \"none\"\n # Configure the backend to use.\n + \ calico_backend: \"vxlan\"\n # On Azure, the underlying network has an MTU of + 1400, even though the network interface will have an MTU of 1500.\n # We set + this value to 1350 for “physical network MTU size minus 50” since we use VXLAN, + which uses a 50-byte header.\n # If enabling Wireguard, this value should be + changed to 1340 (Wireguard uses a 60-byte header).\n # https://docs.projectcalico.org/networking/mtu#determine-mtu-size\n + \ veth_mtu: \"1350\"\n \n # The CNI network configuration to install on each + node. The special\n # values in this config will be automatically populated.\n + \ cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": + \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n + \ \"log_level\": \"info\",\n \"log_file_path\": \"/var/log/calico/cni/cni.log\",\n + \ \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n + \ \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": + \"calico-ipam\"\n },\n \"policy\": {\n \"type\": + \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": + \"__KUBECONFIG_FILEPATH__\"\n }\n },\n {\n \"type\": + \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": + true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": + {\"bandwidth\": true}\n }\n ]\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\n\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: bgpconfigurations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: BGPConfiguration\n listKind: BGPConfigurationList\n plural: + bgpconfigurations\n singular: bgpconfiguration\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n description: + BGPConfiguration contains the configuration for any BGP routing.\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: BGPConfigurationSpec contains the + values of the BGP configuration.\n properties:\n asNumber:\n + \ description: 'ASNumber is the default AS number used by a node. + [Default:\n 64512]'\n format: int32\n type: + integer\n communities:\n description: Communities + is a list of BGP community values and their\n arbitrary names + for tagging routes.\n items:\n description: + Community contains standard or large community value\n and + its name.\n properties:\n name:\n description: + Name given to community value.\n type: string\n value:\n + \ description: Value must be of format `aa:nn` or `aa:nn:mm`.\n + \ For standard community use `aa:nn` format, where `aa` + and\n `nn` are 16 bit number. For large community use + `aa:nn:mm`\n format, where `aa`, `nn` and `mm` are 32 + bit number. Where,\n `aa` is an AS Number, `nn` and `mm` + are per-AS identifier.\n pattern: ^(\\d+):(\\d+)$|^(\\d+):(\\d+):(\\d+)$\n + \ type: string\n type: object\n type: + array\n listenPort:\n description: ListenPort + is the port where BGP protocol should listen.\n Defaults to + 179\n maximum: 65535\n minimum: 1\n type: + integer\n logSeverityScreen:\n description: 'LogSeverityScreen + is the log severity above which logs\n are sent to the stdout. + [Default: INFO]'\n type: string\n nodeToNodeMeshEnabled:\n + \ description: 'NodeToNodeMeshEnabled sets whether full node to + node\n BGP mesh is enabled. [Default: true]'\n type: + boolean\n prefixAdvertisements:\n description: + PrefixAdvertisements contains per-prefix advertisement\n configuration.\n + \ items:\n description: PrefixAdvertisement + configures advertisement properties\n for the specified CIDR.\n + \ properties:\n cidr:\n description: + CIDR for which properties should be advertised.\n type: + string\n communities:\n description: + Communities can be list of either community names\n already + defined in `Specs.Communities` or community value\n of + format `aa:nn` or `aa:nn:mm`. For standard community use\n `aa:nn` + format, where `aa` and `nn` are 16 bit number. For\n large + community use `aa:nn:mm` format, where `aa`, `nn` and\n `mm` + are 32 bit number. Where,`aa` is an AS Number, `nn` and\n `mm` + are per-AS identifier.\n items:\n type: + string\n type: array\n type: object\n + \ type: array\n serviceClusterIPs:\n description: + ServiceClusterIPs are the CIDR blocks from which service\n cluster + IPs are allocated. If specified, Calico will advertise these\n blocks, + as well as any cluster IPs within them.\n items:\n description: + ServiceClusterIPBlock represents a single allowed ClusterIP\n CIDR + block.\n properties:\n cidr:\n type: + string\n type: object\n type: array\n serviceExternalIPs:\n + \ description: ServiceExternalIPs are the CIDR blocks for Kubernetes\n + \ Service External IPs. Kubernetes Service ExternalIPs will + only be\n advertised if they are within one of these blocks.\n + \ items:\n description: ServiceExternalIPBlock + represents a single allowed\n External IP CIDR block.\n properties:\n + \ cidr:\n type: string\n type: + object\n type: array\n serviceLoadBalancerIPs:\n + \ description: ServiceLoadBalancerIPs are the CIDR blocks for + Kubernetes\n Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress\n + \ IPs will only be advertised if they are within one of these + blocks.\n items:\n description: ServiceLoadBalancerIPBlock + represents a single allowed\n LoadBalancer IP CIDR block.\n + \ properties:\n cidr:\n type: + string\n type: object\n type: array\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: bgppeers.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: BGPPeer\n listKind: BGPPeerList\n plural: bgppeers\n + \ singular: bgppeer\n scope: Cluster\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: BGPPeerSpec contains the specification + for a BGPPeer resource.\n properties:\n asNumber:\n + \ description: The AS Number of the peer.\n format: + int32\n type: integer\n keepOriginalNextHop:\n + \ description: Option to keep the original nexthop field when + routes\n are sent to a BGP Peer. Setting \"true\" configures + the selected BGP\n Peers node to use the \"next hop keep;\" + instead of \"next hop self;\"(default)\n in the specific branch + of the Node on \"bird.cfg\".\n type: boolean\n maxRestartTime:\n + \ description: Time to allow for software restart. When specified, + this\n is configured as the graceful restart timeout. When + not specified,\n the BIRD default of 120s is used.\n type: + string\n node:\n description: The node name identifying + the Calico node instance that\n is targeted by this peer. If + this is not set, and no nodeSelector\n is specified, then this + BGP peer selects all nodes in the cluster.\n type: string\n nodeSelector:\n + \ description: Selector for the nodes that should have this peering. + \ When\n this is set, the Node field must be empty.\n type: + string\n password:\n description: Optional BGP + password for the peerings generated by this\n BGPPeer resource.\n + \ properties:\n secretKeyRef:\n description: + Selects a key of a secret in the node pod's namespace.\n properties:\n + \ key:\n description: The key of + the secret to select from. Must be\n a valid secret + key.\n type: string\n name:\n + \ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n + \ TODO: Add other useful fields. apiVersion, kind, uid?'\n + \ type: string\n optional:\n description: + Specify whether the Secret or its key must be\n defined\n + \ type: boolean\n required:\n - + key\n type: object\n type: object\n peerIP:\n + \ description: The IP address of the peer followed by an optional + port\n number to peer with. If port number is given, format + should be `[]:port`\n or `:` for IPv4. If + optional port number is not set,\n and this peer IP and ASNumber + belongs to a calico/node with ListenPort\n set in BGPConfiguration, + then we use that port to peer.\n type: string\n peerSelector:\n + \ description: Selector for the remote nodes to peer with. When + this\n is set, the PeerIP and ASNumber fields must be empty. + \ For each\n peering between the local node and selected remote + nodes, we configure\n an IPv4 peering if both ends have NodeBGPSpec.IPv4Address + specified,\n and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address + specified. The\n remote AS number comes from the remote node’s + NodeBGPSpec.ASNumber,\n or the global default if that is not + set.\n type: string\n sourceAddress:\n description: + Specifies whether and how to configure a source address\n for + the peerings generated by this BGPPeer resource. Default value\n \"UseNodeIP\" + means to configure the node IP as the source address. \"None\"\n means + not to configure a source address.\n type: string\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: blockaffinities.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: BlockAffinity\n listKind: BlockAffinityList\n plural: + blockaffinities\n singular: blockaffinity\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: BlockAffinitySpec contains the specification + for a BlockAffinity\n resource.\n properties:\n cidr:\n + \ type: string\n deleted:\n description: + Deleted indicates that this block affinity is being deleted.\n This + field is a string for compatibility with older releases that\n mistakenly + treat this field as a string.\n type: string\n node:\n + \ type: string\n state:\n type: + string\n required:\n - cidr\n - deleted\n + \ - node\n - state\n type: object\n + \ type: object\n served: true\n storage: true\nstatus:\n acceptedNames:\n + \ kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: clusterinformations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: ClusterInformation\n listKind: ClusterInformationList\n + \ plural: clusterinformations\n singular: clusterinformation\n scope: Cluster\n + \ versions:\n - name: v1\n schema:\n openAPIV3Schema:\n description: + ClusterInformation contains the cluster specific information.\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: ClusterInformationSpec contains + the values of describing\n the cluster.\n properties:\n + \ calicoVersion:\n description: CalicoVersion is + the version of Calico that the cluster\n is running\n type: + string\n clusterGUID:\n description: ClusterGUID + is the GUID of the cluster\n type: string\n clusterType:\n + \ description: ClusterType describes the type of the cluster\n + \ type: string\n datastoreReady:\n description: + DatastoreReady is used during significant datastore migrations\n to + signal to components such as Felix that it should wait before\n accessing + the datastore.\n type: boolean\n variant:\n description: + Variant declares which variant of Calico should be active.\n type: + string\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: + CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuilder.io/version: + (devel)\n creationTimestamp: null\n name: felixconfigurations.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: FelixConfiguration\n listKind: + FelixConfigurationList\n plural: felixconfigurations\n singular: felixconfiguration\n + \ scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ description: Felix Configuration contains the configuration for Felix.\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: FelixConfigurationSpec contains + the values of the Felix configuration.\n properties:\n allowIPIPPacketsFromWorkloads:\n + \ description: 'AllowIPIPPacketsFromWorkloads controls whether + Felix\n will add a rule to drop IPIP encapsulated traffic from + workloads\n [Default: false]'\n type: boolean\n + \ allowVXLANPacketsFromWorkloads:\n description: + 'AllowVXLANPacketsFromWorkloads controls whether Felix\n will + add a rule to drop VXLAN encapsulated traffic from workloads\n [Default: + false]'\n type: boolean\n awsSrcDstCheck:\n description: + 'Set source-destination-check on AWS EC2 instances. Accepted\n value + must be one of \"DoNothing\", \"Enabled\" or \"Disabled\". [Default:\n DoNothing]'\n + \ enum:\n - DoNothing\n - + Enable\n - Disable\n type: string\n bpfConnectTimeLoadBalancingEnabled:\n + \ description: 'BPFConnectTimeLoadBalancingEnabled when in BPF + mode,\n controls whether Felix installs the connection-time load + balancer. The\n connect-time load balancer is required for the + host to be able to\n reach Kubernetes services and it improves + the performance of pod-to-service\n connections. The only reason + to disable it is for debugging purposes. [Default:\n true]'\n + \ type: boolean\n bpfDataIfacePattern:\n description: + 'BPFDataIfacePattern is a regular expression that controls\n which + interfaces Felix should attach BPF programs to in order to\n catch + traffic to/from the network. This needs to match the interfaces\n that + Calico workload traffic flows over as well as any interfaces\n that + handle incoming traffic to nodeports and services from outside\n the + cluster. It should not match the workload interfaces (usually\n named + cali...). [Default: ^(en.*|eth.*|tunl0$)]'\n type: string\n bpfDisableUnprivileged:\n + \ description: 'BPFDisableUnprivileged, if enabled, Felix sets + the kernel.unprivileged_bpf_disabled\n sysctl to disable unprivileged + use of BPF. This ensures that unprivileged\n users cannot access + Calico''s BPF maps and cannot insert their own\n BPF programs + to interfere with Calico''s. [Default: true]'\n type: boolean\n + \ bpfEnabled:\n description: 'BPFEnabled, if enabled + Felix will use the BPF dataplane.\n [Default: false]'\n type: + boolean\n bpfExtToServiceConnmark:\n description: + 'BPFExtToServiceConnmark in BPF mode, control a 32bit\n mark + that is set on connections from an external client to a local\n service. + This mark allows us to control how packets of that connection\n are + routed within the host and how is routing intepreted by RPF\n check. + [Default: 0]'\n type: integer\n bpfExternalServiceMode:\n + \ description: 'BPFExternalServiceMode in BPF mode, controls how + connections\n from outside the cluster to services (node ports + and cluster IPs)\n are forwarded to remote workloads. If set + to \"Tunnel\" then both\n request and response traffic is tunneled + to the remote node. If\n set to \"DSR\", the request traffic + is tunneled but the response traffic\n is sent directly from + the remote node. In \"DSR\" mode, the remote\n node appears + to use the IP of the ingress node; this requires a\n permissive + L2 network. [Default: Tunnel]'\n type: string\n bpfKubeProxyEndpointSlicesEnabled:\n + \ description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, + controls\n whether Felix's embedded kube-proxy accepts EndpointSlices + or not.\n type: boolean\n bpfKubeProxyIptablesCleanupEnabled:\n + \ description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled + in BPF\n mode, Felix will proactively clean up the upstream Kubernetes + kube-proxy''s\n iptables chains. Should only be enabled if kube-proxy + is not running. [Default:\n true]'\n type: + boolean\n bpfKubeProxyMinSyncPeriod:\n description: + 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the\n minimum + time between updates to the dataplane for Felix''s embedded\n kube-proxy. + \ Lower values give reduced set-up latency. Higher values\n reduce + Felix CPU usage by batching up more work. [Default: 1s]'\n type: + string\n bpfLogLevel:\n description: 'BPFLogLevel + controls the log level of the BPF programs\n when in BPF dataplane + mode. One of \"Off\", \"Info\", or \"Debug\". The\n logs are + emitted to the BPF trace pipe, accessible with the command\n `tc + exec bpf debug`. [Default: Off].'\n type: string\n chainInsertMode:\n + \ description: 'ChainInsertMode controls whether Felix hooks the + kernel’s\n top-level iptables chains by inserting a rule at the + top of the\n chain or by appending a rule at the bottom. insert + is the safe default\n since it prevents Calico’s rules from being + bypassed. If you switch\n to append mode, be sure that the other + rules in the chains signal\n acceptance by falling through to + the Calico rules, otherwise the\n Calico policy will be bypassed. + [Default: insert]'\n type: string\n dataplaneDriver:\n + \ type: string\n debugDisableLogDropping:\n type: + boolean\n debugMemoryProfilePath:\n type: string\n + \ debugSimulateCalcGraphHangAfter:\n type: string\n + \ debugSimulateDataplaneHangAfter:\n type: string\n + \ defaultEndpointToHostAction:\n description: 'DefaultEndpointToHostAction + controls what happens to\n traffic that goes from a workload + endpoint to the host itself (after\n the traffic hits the endpoint + egress policy). By default Calico\n blocks traffic from workload + endpoints to the host itself with an\n iptables “DROP” action. + If you want to allow some or all traffic\n from endpoint to host, + set this parameter to RETURN or ACCEPT. Use\n RETURN if you have + your own rules in the iptables “INPUT” chain;\n Calico will insert + its rules at the top of that chain, then “RETURN”\n packets to + the “INPUT” chain once it has completed processing workload\n endpoint + egress policy. Use ACCEPT to unconditionally accept packets\n from + workloads after processing workload endpoint egress policy.\n [Default: + Drop]'\n type: string\n deviceRouteProtocol:\n + \ description: This defines the route protocol added to programmed + device\n routes, by default this will be RTPROT_BOOT when left + blank.\n type: integer\n deviceRouteSourceAddress:\n + \ description: This is the source address to use on programmed + device\n routes. By default the source address is left blank, + leaving the\n kernel to choose the source address used.\n type: + string\n disableConntrackInvalidCheck:\n type: + boolean\n endpointReportingDelay:\n type: string\n + \ endpointReportingEnabled:\n type: boolean\n externalNodesList:\n + \ description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes\n + \ which may source tunnel traffic and have the tunneled traffic + be\n accepted at calico nodes.\n items:\n + \ type: string\n type: array\n failsafeInboundHostPorts:\n + \ description: 'FailsafeInboundHostPorts is a list of UDP/TCP + ports\n and CIDRs that Felix will allow incoming traffic to + host endpoints\n on irrespective of the security policy. This + is useful to avoid\n accidentally cutting off a host with incorrect + configuration. For\n back-compatibility, if the protocol is + not specified, it defaults\n to \"tcp\". If a CIDR is not specified, + it will allow traffic from\n all addresses. To disable all + inbound host ports, use the value\n none. The default value + allows ssh access and DHCP. [Default: tcp:22,\n udp:68, tcp:179, + tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]'\n items:\n + \ description: ProtoPort is combination of protocol, port, and + CIDR.\n Protocol and port must be specified.\n properties:\n + \ net:\n type: string\n port:\n + \ type: integer\n protocol:\n type: + string\n required:\n - port\n - + protocol\n type: object\n type: array\n failsafeOutboundHostPorts:\n + \ description: 'FailsafeOutboundHostPorts is a list of UDP/TCP + ports\n and CIDRs that Felix will allow outgoing traffic from + host endpoints\n to irrespective of the security policy. This + is useful to avoid\n accidentally cutting off a host with incorrect + configuration. For\n back-compatibility, if the protocol is + not specified, it defaults\n to \"tcp\". If a CIDR is not specified, + it will allow traffic from\n all addresses. To disable all + outbound host ports, use the value\n none. The default value + opens etcd''s standard ports to ensure that\n Felix does not + get cut off from etcd as well as allowing DHCP and\n DNS. [Default: + tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666,\n tcp:6667, + udp:53, udp:67]'\n items:\n description: ProtoPort + is combination of protocol, port, and CIDR.\n Protocol and + port must be specified.\n properties:\n net:\n + \ type: string\n port:\n type: + integer\n protocol:\n type: string\n + \ required:\n - port\n - + protocol\n type: object\n type: array\n featureDetectOverride:\n + \ description: FeatureDetectOverride is used to override the feature\n + \ detection. Values are specified in a comma separated list + with no\n spaces, example; \"SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=\".\n + \ \"true\" or \"false\" will force the feature, empty or omitted + values\n are auto-detected.\n type: string\n + \ genericXDPEnabled:\n description: 'GenericXDPEnabled + enables Generic XDP so network cards\n that don''t support XDP + offload or driver modes can use XDP. This\n is not recommended + since it doesn''t provide better performance\n than iptables. + [Default: false]'\n type: boolean\n healthEnabled:\n + \ type: boolean\n healthHost:\n type: + string\n healthPort:\n type: integer\n interfaceExclude:\n + \ description: 'InterfaceExclude is a comma-separated list of + interfaces\n that Felix should exclude when monitoring for host + endpoints. The\n default value ensures that Felix ignores Kubernetes'' + IPVS dummy\n interface, which is used internally by kube-proxy. + If you want to\n exclude multiple interface names using a single + value, the list\n supports regular expressions. For regular expressions + you must wrap\n the value with ''/''. For example having values + ''/^kube/,veth1''\n will exclude all interfaces that begin with + ''kube'' and also the\n interface ''veth1''. [Default: kube-ipvs0]'\n + \ type: string\n interfacePrefix:\n description: + 'InterfacePrefix is the interface name prefix that identifies\n workload + endpoints and so distinguishes them from host endpoint\n interfaces. + Note: in environments other than bare metal, the orchestrators\n configure + this appropriately. For example our Kubernetes and Docker\n integrations + set the ‘cali’ value, and our OpenStack integration\n sets the + ‘tap’ value. [Default: cali]'\n type: string\n interfaceRefreshInterval:\n + \ description: InterfaceRefreshInterval is the period at which + Felix\n rescans local interfaces to verify their state. The + rescan can be\n disabled by setting the interval to 0.\n type: + string\n ipipEnabled:\n type: boolean\n ipipMTU:\n + \ description: 'IPIPMTU is the MTU to set on the tunnel device. + See\n Configuring MTU [Default: 1440]'\n type: + integer\n ipsetsRefreshInterval:\n description: + 'IpsetsRefreshInterval is the period at which Felix re-checks\n all + iptables state to ensure that no other process has accidentally\n broken + Calico’s rules. Set to 0 to disable iptables refresh. [Default:\n 90s]'\n + \ type: string\n iptablesBackend:\n description: + IptablesBackend specifies which backend of iptables will\n be + used. The default is legacy.\n type: string\n iptablesFilterAllowAction:\n + \ type: string\n iptablesLockFilePath:\n description: + 'IptablesLockFilePath is the location of the iptables\n lock + file. You may need to change this if the lock file is not in\n its + standard location (for example if you have mapped it into Felix’s\n container + at a different path). [Default: /run/xtables.lock]'\n type: string\n + \ iptablesLockProbeInterval:\n description: 'IptablesLockProbeInterval + is the time that Felix will\n wait between attempts to acquire + the iptables lock if it is not\n available. Lower values make + Felix more responsive when the lock\n is contended, but use more + CPU. [Default: 50ms]'\n type: string\n iptablesLockTimeout:\n + \ description: 'IptablesLockTimeout is the time that Felix will + wait\n for the iptables lock, or 0, to disable. To use this feature, + Felix\n must share the iptables lock file with all other processes + that\n also take the lock. When running Felix inside a container, + this\n requires the /run directory of the host to be mounted + into the calico/node\n or calico/felix container. [Default: 0s + disabled]'\n type: string\n iptablesMangleAllowAction:\n + \ type: string\n iptablesMarkMask:\n description: + 'IptablesMarkMask is the mask that Felix selects its\n IPTables + Mark bits from. Should be a 32 bit hexadecimal number with\n at + least 8 bits set, none of which clash with any other mark bits\n in + use on the system. [Default: 0xff000000]'\n format: int32\n type: + integer\n iptablesNATOutgoingInterfaceFilter:\n type: + string\n iptablesPostWriteCheckInterval:\n description: + 'IptablesPostWriteCheckInterval is the period after Felix\n has + done a write to the dataplane that it schedules an extra read\n back + in order to check the write was not clobbered by another process.\n This + should only occur if another application on the system doesn’t\n respect + the iptables lock. [Default: 1s]'\n type: string\n iptablesRefreshInterval:\n + \ description: 'IptablesRefreshInterval is the period at which + Felix\n re-checks the IP sets in the dataplane to ensure that + no other process\n has accidentally broken Calico''s rules. + Set to 0 to disable IP\n sets refresh. Note: the default for + this value is lower than the\n other refresh intervals as a + workaround for a Linux kernel bug that\n was fixed in kernel + version 4.11. If you are using v4.11 or greater\n you may want + to set this to, a higher value to reduce Felix CPU\n usage. + [Default: 10s]'\n type: string\n ipv6Support:\n + \ type: boolean\n kubeNodePortRanges:\n description: + 'KubeNodePortRanges holds list of port ranges used for\n service + node ports. Only used if felix detects kube-proxy running\n in + ipvs mode. Felix uses these ranges to separate host and workload\n traffic. + [Default: 30000:32767].'\n items:\n anyOf:\n + \ - type: integer\n - type: string\n + \ pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n logFilePath:\n description: + 'LogFilePath is the full path to the Felix log. Set to\n none + to disable file logging. [Default: /var/log/calico/felix.log]'\n type: + string\n logPrefix:\n description: 'LogPrefix + is the log prefix that Felix uses when rendering\n LOG rules. + [Default: calico-packet]'\n type: string\n logSeverityFile:\n + \ description: 'LogSeverityFile is the log severity above which + logs\n are sent to the log file. [Default: Info]'\n type: + string\n logSeverityScreen:\n description: 'LogSeverityScreen + is the log severity above which logs\n are sent to the stdout. + [Default: Info]'\n type: string\n logSeveritySys:\n + \ description: 'LogSeveritySys is the log severity above which + logs\n are sent to the syslog. Set to None for no logging to + syslog. [Default:\n Info]'\n type: string\n + \ maxIpsetSize:\n type: integer\n metadataAddr:\n + \ description: 'MetadataAddr is the IP address or domain name + of the\n server that can answer VM queries for cloud-init metadata. + In OpenStack,\n this corresponds to the machine running nova-api + (or in Ubuntu,\n nova-api-metadata). A value of none (case insensitive) + means that\n Felix should not set up any NAT rule for the metadata + path. [Default:\n 127.0.0.1]'\n type: string\n + \ metadataPort:\n description: 'MetadataPort is + the port of the metadata server. This,\n combined with global.MetadataAddr + (if not ‘None’), is used to set\n up a NAT rule, from 169.254.169.254:80 + to MetadataAddr:MetadataPort.\n In most cases this should not + need to be changed [Default: 8775].'\n type: integer\n mtuIfacePattern:\n + \ description: MTUIfacePattern is a regular expression that controls\n + \ which interfaces Felix should scan in order to calculate the + host's\n MTU. This should not match workload interfaces (usually + named cali...).\n type: string\n natOutgoingAddress:\n + \ description: NATOutgoingAddress specifies an address to use + when performing\n source NAT for traffic in a natOutgoing pool + that is leaving the\n network. By default the address used + is an address on the interface\n the traffic is leaving on + (ie it uses the iptables MASQUERADE target)\n type: string\n + \ natPortRange:\n anyOf:\n - + type: integer\n - type: string\n description: + NATPortRange specifies the range of ports that is used\n for + port mapping when doing outgoing NAT. When unset the default\n behavior + of the network stack is used.\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n netlinkTimeout:\n type: string\n openstackRegion:\n + \ description: 'OpenstackRegion is the name of the region that + a particular\n Felix belongs to. In a multi-region Calico/OpenStack + deployment,\n this must be configured somehow for each Felix + (here in the datamodel,\n or in felix.cfg or the environment + on each compute node), and must\n match the [calico] openstack_region + value configured in neutron.conf\n on each node. [Default: Empty]'\n + \ type: string\n policySyncPathPrefix:\n description: + 'PolicySyncPathPrefix is used to by Felix to communicate\n policy + changes to external services, like Application layer policy.\n [Default: + Empty]'\n type: string\n prometheusGoMetricsEnabled:\n + \ description: 'PrometheusGoMetricsEnabled disables Go runtime + metrics\n collection, which the Prometheus client does by default, + when set\n to false. This reduces the number of metrics reported, + reducing\n Prometheus load. [Default: true]'\n type: + boolean\n prometheusMetricsEnabled:\n description: + 'PrometheusMetricsEnabled enables the Prometheus metrics\n server + in Felix if set to true. [Default: false]'\n type: boolean\n + \ prometheusMetricsHost:\n description: 'PrometheusMetricsHost + is the host that the Prometheus\n metrics server should bind + to. [Default: empty]'\n type: string\n prometheusMetricsPort:\n + \ description: 'PrometheusMetricsPort is the TCP port that the + Prometheus\n metrics server should bind to. [Default: 9091]'\n + \ type: integer\n prometheusProcessMetricsEnabled:\n + \ description: 'PrometheusProcessMetricsEnabled disables process + metrics\n collection, which the Prometheus client does by default, + when set\n to false. This reduces the number of metrics reported, + reducing\n Prometheus load. [Default: true]'\n type: + boolean\n removeExternalRoutes:\n description: + Whether or not to remove device routes that have not\n been + programmed by Felix. Disabling this will allow external applications\n to + also add device routes. This is enabled by default which means\n we + will remove externally added routes.\n type: boolean\n reportingInterval:\n + \ description: 'ReportingInterval is the interval at which Felix + reports\n its status into the datastore or 0 to disable. Must + be non-zero\n in OpenStack deployments. [Default: 30s]'\n type: + string\n reportingTTL:\n description: 'ReportingTTL + is the time-to-live setting for process-wide\n status reports. + [Default: 90s]'\n type: string\n routeRefreshInterval:\n + \ description: 'RouterefreshInterval is the period at which Felix + re-checks\n the routes in the dataplane to ensure that no other + process has\n accidentally broken Calico’s rules. Set to 0 to + disable route refresh.\n [Default: 90s]'\n type: + string\n routeSource:\n description: 'RouteSource + configures where Felix gets its routing\n information. - WorkloadIPs: + use workload endpoints to construct\n routes. - CalicoIPAM: the + default - use IPAM data to construct routes.'\n type: string\n + \ routeTableRange:\n description: Calico programs + additional Linux route tables for various\n purposes. RouteTableRange + specifies the indices of the route tables\n that Calico should + use.\n properties:\n max:\n type: + integer\n min:\n type: integer\n required:\n + \ - max\n - min\n type: + object\n serviceLoopPrevention:\n description: + 'When service IP advertisement is enabled, prevent routing\n loops + to service IPs that are not in use, by dropping or rejecting\n packets + that do not get DNAT''d by kube-proxy. Unless set to \"Disabled\",\n in + which case such routing loops continue to be allowed. [Default:\n Drop]'\n + \ type: string\n sidecarAccelerationEnabled:\n + \ description: 'SidecarAccelerationEnabled enables experimental + sidecar\n acceleration [Default: false]'\n type: + boolean\n usageReportingEnabled:\n description: + 'UsageReportingEnabled reports anonymous Calico version\n number + and cluster size to projectcalico.org. Logs warnings returned\n by + the usage server. For example, if a significant security vulnerability\n has + been discovered in the version of Calico being used. [Default:\n true]'\n + \ type: boolean\n usageReportingInitialDelay:\n + \ description: 'UsageReportingInitialDelay controls the minimum + delay\n before Felix makes a report. [Default: 300s]'\n type: + string\n usageReportingInterval:\n description: + 'UsageReportingInterval controls the interval at which\n Felix + makes reports. [Default: 86400s]'\n type: string\n useInternalDataplaneDriver:\n + \ type: boolean\n vxlanEnabled:\n type: + boolean\n vxlanMTU:\n description: 'VXLANMTU is + the MTU to set on the tunnel device. See\n Configuring MTU [Default: + 1440]'\n type: integer\n vxlanPort:\n type: + integer\n vxlanVNI:\n type: integer\n wireguardEnabled:\n + \ description: 'WireguardEnabled controls whether Wireguard is + enabled.\n [Default: false]'\n type: boolean\n + \ wireguardInterfaceName:\n description: 'WireguardInterfaceName + specifies the name to use for\n the Wireguard interface. [Default: + wg.calico]'\n type: string\n wireguardListeningPort:\n + \ description: 'WireguardListeningPort controls the listening + port used\n by Wireguard. [Default: 51820]'\n type: + integer\n wireguardMTU:\n description: 'WireguardMTU + controls the MTU on the Wireguard interface.\n See Configuring + MTU [Default: 1420]'\n type: integer\n wireguardRoutingRulePriority:\n + \ description: 'WireguardRoutingRulePriority controls the priority + value\n to use for the Wireguard routing rule. [Default: 99]'\n + \ type: integer\n xdpEnabled:\n description: + 'XDPEnabled enables XDP acceleration for suitable untracked\n incoming + deny rules. [Default: true]'\n type: boolean\n xdpRefreshInterval:\n + \ description: 'XDPRefreshInterval is the period at which Felix + re-checks\n all XDP state to ensure that no other process has + accidentally broken\n Calico''s BPF maps or attached programs. + Set to 0 to disable XDP\n refresh. [Default: 90s]'\n type: + string\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: + CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuilder.io/version: + (devel)\n creationTimestamp: null\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: GlobalNetworkPolicy\n listKind: + GlobalNetworkPolicyList\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n + \ scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n properties:\n applyOnForward:\n + \ description: ApplyOnForward indicates to apply the rules in + this policy\n on forward traffic.\n type: + boolean\n doNotTrack:\n description: DoNotTrack + indicates whether packets matched by the rules\n in this policy + should go through the data plane's connection tracking,\n such + as Linux conntrack. If True, the rules in this policy are\n applied + before any data plane connection tracking, and packets allowed\n by + this policy are marked as not to be tracked.\n type: boolean\n + \ egress:\n description: The ordered set of egress + rules. Each rule contains\n a set of packet match criteria + and a corresponding action to apply.\n items:\n description: + \"A Rule encapsulates a set of match criteria and an\n action. + \ Both selector-based security Policy and security Profiles\n reference + rules - separated out as a list of rules for both ingress\n and + egress packet matching. \\n Each positive match criteria has\n a + negated version, prefixed with ”Not”. All the match criteria\n within + a rule must be satisfied for a packet to match. A single\n rule + can contain the positive and negative version of a match\n and + both must be satisfied for the rule to match.\"\n properties:\n + \ action:\n type: string\n destination:\n + \ description: Destination contains the match criteria that + apply\n to destination entity.\n properties:\n + \ namespaceSelector:\n description: + \"NamespaceSelector is an optional field that\n contains + a selector expression. Only traffic that originates\n from + (or terminates at) endpoints within the selected\n namespaces + will be matched. When both NamespaceSelector\n and + Selector are defined on the same rule, then only workload\n endpoints + that are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n ingress:\n description: The ordered set + of ingress rules. Each rule contains\n a set of packet match + criteria and a corresponding action to apply.\n items:\n description: + \"A Rule encapsulates a set of match criteria and an\n action. + \ Both selector-based security Policy and security Profiles\n reference + rules - separated out as a list of rules for both ingress\n and + egress packet matching. \\n Each positive match criteria has\n a + negated version, prefixed with ”Not”. All the match criteria\n within + a rule must be satisfied for a packet to match. A single\n rule + can contain the positive and negative version of a match\n and + both must be satisfied for the rule to match.\"\n properties:\n + \ action:\n type: string\n destination:\n + \ description: Destination contains the match criteria that + apply\n to destination entity.\n properties:\n + \ namespaceSelector:\n description: + \"NamespaceSelector is an optional field that\n contains + a selector expression. Only traffic that originates\n from + (or terminates at) endpoints within the selected\n namespaces + will be matched. When both NamespaceSelector\n and + Selector are defined on the same rule, then only workload\n endpoints + that are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n namespaceSelector:\n description: NamespaceSelector + is an optional field for an expression\n used to select a pod + based on namespaces.\n type: string\n order:\n + \ description: Order is an optional field that specifies the order + in\n which the policy is applied. Policies with higher \"order\" + are applied\n after those with lower order. If the order is + omitted, it may be\n considered to be \"infinite\" - i.e. the + policy will be applied last. Policies\n with identical order + will be applied in alphanumerical order based\n on the Policy + \"Name\".\n type: number\n preDNAT:\n description: + PreDNAT indicates to apply the rules in this policy before\n any + DNAT.\n type: boolean\n selector:\n description: + \"The selector is an expression used to pick pick out\n the endpoints + that the policy should be applied to. \\n Selector\n expressions + follow this syntax: \\n \\tlabel == \\\"string_literal\\\"\n \\ + -> comparison, e.g. my_label == \\\"foo bar\\\" \\tlabel != \\\"string_literal\\\"\n + \ \\ -> not equal; also matches if label is not present \\tlabel + in\n { \\\"a\\\", \\\"b\\\", \\\"c\\\", ... } -> true if the + value of label X is\n one of \\\"a\\\", \\\"b\\\", \\\"c\\\" + \\tlabel not in { \\\"a\\\", \\\"b\\\", \\\"c\\\",\n ... } -> + \ true if the value of label X is not one of \\\"a\\\", \\\"b\\\",\n \\\"c\\\" + \\thas(label_name) -> True if that label is present \\t! expr\n -> + negation of expr \\texpr && expr -> Short-circuit and \\texpr\n || + expr -> Short-circuit or \\t( expr ) -> parens for grouping \\tall()\n or + the empty selector -> matches all endpoints. \\n Label names are\n allowed + to contain alphanumerics, -, _ and /. String literals are\n more + permissive but they do not support escape characters. \\n Examples\n (with + made-up labels): \\n \\ttype == \\\"webserver\\\" && deployment\n == + \\\"prod\\\" \\ttype in {\\\"frontend\\\", \\\"backend\\\"} \\tdeployment !=\n + \ \\\"dev\\\" \\t! has(label_name)\"\n type: + string\n serviceAccountSelector:\n description: + ServiceAccountSelector is an optional field for an expression\n used + to select a pod based on service accounts.\n type: string\n types:\n + \ description: \"Types indicates whether this policy applies to + ingress,\n or to egress, or to both. When not explicitly specified + (and so\n the value on creation is empty or nil), Calico defaults + Types according\n to what Ingress and Egress rules are present + in the policy. The\n default is: \\n - [ PolicyTypeIngress ], + if there are no Egress rules\n (including the case where there + are also no Ingress rules) \\n\n - [ PolicyTypeEgress ], if + there are Egress rules but no Ingress\n rules \\n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are\n both Ingress and Egress rules. + \\n When the policy is read back again,\n Types will always be + one of these values, never empty or nil.\"\n items:\n description: + PolicyType enumerates the possible values of the PolicySpec\n Types + field.\n type: string\n type: array\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: globalnetworksets.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: GlobalNetworkSet\n listKind: GlobalNetworkSetList\n plural: + globalnetworksets\n singular: globalnetworkset\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n description: + GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs\n that + share labels to allow rules to refer to them via selectors. The labels\n of + GlobalNetworkSet are not namespaced.\n properties:\n apiVersion:\n + \ description: 'APIVersion defines the versioned schema of this representation\n + \ of an object. Servers should convert recognized schemas to the latest\n + \ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: GlobalNetworkSetSpec contains the + specification for a NetworkSet\n resource.\n properties:\n + \ nets:\n description: The list of IP networks + that belong to this set.\n items:\n type: + string\n type: array\n type: object\n type: + object\n served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: hostendpoints.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: HostEndpoint\n listKind: HostEndpointList\n plural: + hostendpoints\n singular: hostendpoint\n scope: Cluster\n versions:\n - + name: v1\n schema:\n openAPIV3Schema:\n properties:\n apiVersion:\n + \ description: 'APIVersion defines the versioned schema of this representation\n + \ of an object. Servers should convert recognized schemas to the latest\n + \ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: HostEndpointSpec contains the specification + for a HostEndpoint\n resource.\n properties:\n expectedIPs:\n + \ description: \"The expected IP addresses (IPv4 and IPv6) of + the endpoint.\n If \\\"InterfaceName\\\" is not present, Calico + will look for an interface\n matching any of the IPs in the list + and apply policy to that. Note:\n \\tWhen using the selector + match criteria in an ingress or egress\n security Policy \\tor + Profile, Calico converts the selector into\n a set of IP addresses. + For host \\tendpoints, the ExpectedIPs field\n is used for that + purpose. (If only the interface \\tname is specified,\n Calico + does not learn the IPs of the interface for use in match\n \\tcriteria.)\"\n + \ items:\n type: string\n type: + array\n interfaceName:\n description: \"Either + \\\"*\\\", or the name of a specific Linux interface\n to apply + policy to; or empty. \\\"*\\\" indicates that this HostEndpoint\n governs + all traffic to, from or through the default network namespace\n of + the host named by the \\\"Node\\\" field; entering and leaving that\n namespace + via any interface, including those from/to non-host-networked\n local + workloads. \\n If InterfaceName is not \\\"*\\\", this HostEndpoint\n only + governs traffic that enters or leaves the host through the\n specific + interface named by InterfaceName, or - when InterfaceName\n is + empty - through the specific interface that has one of the IPs\n in + ExpectedIPs. Therefore, when InterfaceName is empty, at least\n one + expected IP must be specified. Only external interfaces (such\n as + “eth0”) are supported here; it isn't possible for a HostEndpoint\n to + protect traffic through a specific local workload interface.\n \\n + Note: Only some kinds of policy are implemented for \\\"*\\\" HostEndpoints;\n + \ initially just pre-DNAT policy. Please check Calico documentation\n + \ for the latest position.\"\n type: string\n + \ node:\n description: The node name identifying + the Calico node instance.\n type: string\n ports:\n + \ description: Ports contains the endpoint's named ports, which + may\n be referenced in security policy rules.\n items:\n + \ properties:\n name:\n type: + string\n port:\n type: integer\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n required:\n - name\n - + port\n - protocol\n type: object\n type: + array\n profiles:\n description: A list of identifiers + of security Profile objects that\n apply to this endpoint. + Each profile is applied in the order that\n they appear in + this list. Profile rules are applied after the selector-based\n security + policy.\n items:\n type: string\n type: + array\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: + CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuilder.io/version: + (devel)\n creationTimestamp: null\n name: ipamblocks.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: IPAMBlock\n listKind: IPAMBlockList\n + \ plural: ipamblocks\n singular: ipamblock\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPAMBlockSpec contains the specification + for an IPAMBlock\n resource.\n properties:\n affinity:\n + \ type: string\n allocations:\n items:\n + \ type: integer\n # TODO: This nullable is + manually added in. We should update controller-gen\n # to handle + []*int properly itself.\n nullable: true\n type: + array\n attributes:\n items:\n properties:\n + \ handle_id:\n type: string\n secondary:\n + \ additionalProperties:\n type: + string\n type: object\n type: object\n + \ type: array\n cidr:\n type: + string\n deleted:\n type: boolean\n strictAffinity:\n + \ type: boolean\n unallocated:\n items:\n + \ type: integer\n type: array\n required:\n + \ - allocations\n - attributes\n - + cidr\n - strictAffinity\n - unallocated\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: ipamconfigs.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: IPAMConfig\n listKind: IPAMConfigList\n plural: ipamconfigs\n + \ singular: ipamconfig\n scope: Cluster\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPAMConfigSpec contains the specification + for an IPAMConfig\n resource.\n properties:\n autoAllocateBlocks:\n + \ type: boolean\n maxBlocksPerHost:\n description: + MaxBlocksPerHost, if non-zero, is the max number of blocks\n that + can be affine to each host.\n type: integer\n strictAffinity:\n + \ type: boolean\n required:\n - autoAllocateBlocks\n + \ - strictAffinity\n type: object\n type: + object\n served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: ipamhandles.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: IPAMHandle\n listKind: IPAMHandleList\n plural: ipamhandles\n + \ singular: ipamhandle\n scope: Cluster\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPAMHandleSpec contains the specification + for an IPAMHandle\n resource.\n properties:\n block:\n + \ additionalProperties:\n type: integer\n type: + object\n deleted:\n type: boolean\n handleID:\n + \ type: string\n required:\n - block\n + \ - handleID\n type: object\n type: object\n + \ served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: ippools.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: IPPool\n listKind: IPPoolList\n plural: ippools\n singular: + ippool\n scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPPoolSpec contains the specification + for an IPPool resource.\n properties:\n blockSize:\n + \ description: The block size to use for IP address assignments + from\n this pool. Defaults to 26 for IPv4 and 112 for IPv6.\n + \ type: integer\n cidr:\n description: + The pool CIDR.\n type: string\n disabled:\n description: + When disabled is true, Calico IPAM will not assign addresses\n from + this pool.\n type: boolean\n ipip:\n description: + 'Deprecated: this field is only used for APIv1 backwards\n compatibility. + Setting this field is not allowed, this field is\n for internal + use only.'\n properties:\n enabled:\n description: + When enabled is true, ipip tunneling will be used\n to + deliver packets to destinations within this pool.\n type: + boolean\n mode:\n description: The IPIP + mode. This can be one of \"always\" or \"cross-subnet\". A\n mode + of \"always\" will also use IPIP tunneling for routing to\n destination + IP addresses within this pool. A mode of \"cross-subnet\"\n will + only use IPIP tunneling when the destination node is on\n a + different subnet to the originating node. The default value\n (if + not specified) is \"always\".\n type: string\n type: + object\n ipipMode:\n description: Contains configuration + for IPIP tunneling for this pool.\n If not specified, then + this is defaulted to \"Never\" (i.e. IPIP tunneling\n is disabled).\n + \ type: string\n nat-outgoing:\n description: + 'Deprecated: this field is only used for APIv1 backwards\n compatibility. + Setting this field is not allowed, this field is\n for internal + use only.'\n type: boolean\n natOutgoing:\n description: + When nat-outgoing is true, packets sent from Calico networked\n containers + in this pool to destinations outside of this pool will\n be + masqueraded.\n type: boolean\n nodeSelector:\n + \ description: Allows IPPool to allocate for a specific node by + label\n selector.\n type: string\n vxlanMode:\n + \ description: Contains configuration for VXLAN tunneling for + this pool.\n If not specified, then this is defaulted to \"Never\" + (i.e. VXLAN\n tunneling is disabled).\n type: + string\n required:\n - cidr\n type: object\n + \ type: object\n served: true\n storage: true\nstatus:\n acceptedNames:\n + \ kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: kubecontrollersconfigurations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: KubeControllersConfiguration\n listKind: KubeControllersConfigurationList\n + \ plural: kubecontrollersconfigurations\n singular: kubecontrollersconfiguration\n + \ scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: KubeControllersConfigurationSpec + contains the values of the\n Kubernetes controllers configuration.\n + \ properties:\n controllers:\n description: + Controllers enables and configures individual Kubernetes\n controllers\n + \ properties:\n namespace:\n description: + Namespace enables and configures the namespace controller.\n Enabled + by default, set to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to perform + reconciliation\n with the Calico datastore. [Default: + 5m]'\n type: string\n type: object\n + \ node:\n description: Node enables and + configures the node controller.\n Enabled by default, set + to nil to disable.\n properties:\n hostEndpoint:\n + \ description: HostEndpoint controls syncing nodes to + host endpoints.\n Disabled by default, set to nil to + disable.\n properties:\n autoCreate:\n + \ description: 'AutoCreate enables automatic creation + of\n host endpoints for every node. [Default: Disabled]'\n + \ type: string\n type: object\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform reconciliation\n with + the Calico datastore. [Default: 5m]'\n type: string\n + \ syncLabels:\n description: 'SyncLabels + controls whether to copy Kubernetes\n node labels to + Calico nodes. [Default: Enabled]'\n type: string\n type: + object\n policy:\n description: Policy + enables and configures the policy controller.\n Enabled + by default, set to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to perform + reconciliation\n with the Calico datastore. [Default: + 5m]'\n type: string\n type: object\n + \ serviceAccount:\n description: ServiceAccount + enables and configures the service\n account controller. + Enabled by default, set to nil to disable.\n properties:\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform reconciliation\n with + the Calico datastore. [Default: 5m]'\n type: string\n + \ type: object\n workloadEndpoint:\n description: + WorkloadEndpoint enables and configures the workload\n endpoint + controller. Enabled by default, set to nil to disable.\n properties:\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform reconciliation\n with + the Calico datastore. [Default: 5m]'\n type: string\n + \ type: object\n type: object\n etcdV3CompactionPeriod:\n + \ description: 'EtcdV3CompactionPeriod is the period between etcdv3\n + \ compaction requests. Set to 0 to disable. [Default: 10m]'\n + \ type: string\n healthChecks:\n description: + 'HealthChecks enables or disables support for health\n checks + [Default: Enabled]'\n type: string\n logSeverityScreen:\n + \ description: 'LogSeverityScreen is the log severity above which + logs\n are sent to the stdout. [Default: Info]'\n type: + string\n prometheusMetricsPort:\n description: + 'PrometheusMetricsPort is the TCP port that the Prometheus\n metrics + server should bind to. Set to 0 to disable. [Default: 9094]'\n type: + integer\n required:\n - controllers\n type: + object\n status:\n description: KubeControllersConfigurationStatus + represents the status\n of the configuration. It's useful for admins + to be able to see the actual\n config that was applied, which can + be modified by environment variables\n on the kube-controllers + process.\n properties:\n environmentVars:\n additionalProperties:\n + \ type: string\n description: EnvironmentVars + contains the environment variables on\n the kube-controllers + that influenced the RunningConfig.\n type: object\n runningConfig:\n + \ description: RunningConfig contains the effective config that + is running\n in the kube-controllers pod, after merging the + API resource with\n any environment variables.\n properties:\n + \ controllers:\n description: Controllers + enables and configures individual Kubernetes\n controllers\n + \ properties:\n namespace:\n description: + Namespace enables and configures the namespace\n controller. + Enabled by default, set to nil to disable.\n properties:\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform\n reconciliation + with the Calico datastore. [Default:\n 5m]'\n type: + string\n type: object\n node:\n + \ description: Node enables and configures the node controller.\n + \ Enabled by default, set to nil to disable.\n properties:\n + \ hostEndpoint:\n description: + HostEndpoint controls syncing nodes to host\n endpoints. + Disabled by default, set to nil to disable.\n properties:\n + \ autoCreate:\n description: + 'AutoCreate enables automatic creation\n of host + endpoints for every node. [Default: Disabled]'\n type: + string\n type: object\n leakGracePeriod:\n + \ description: 'LeakGracePeriod is the period used + by the\n controller to determine if an IP address + has been leaked.\n Set to 0 to disable IP garbage + collection. [Default:\n 15m]'\n type: + string\n reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform\n reconciliation + with the Calico datastore. [Default:\n 5m]'\n type: + string\n syncLabels:\n description: + 'SyncLabels controls whether to copy Kubernetes\n node + labels to Calico nodes. [Default: Enabled]'\n type: + string\n type: object\n policy:\n + \ description: Policy enables and configures the policy + controller.\n Enabled by default, set to nil to disable.\n + \ properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to + perform\n reconciliation with the Calico datastore. + [Default:\n 5m]'\n type: + string\n type: object\n serviceAccount:\n + \ description: ServiceAccount enables and configures the + service\n account controller. Enabled by default, set + to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to + perform\n reconciliation with the Calico datastore. + [Default:\n 5m]'\n type: + string\n type: object\n workloadEndpoint:\n + \ description: WorkloadEndpoint enables and configures + the workload\n endpoint controller. Enabled by default, + set to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to + perform\n reconciliation with the Calico datastore. + [Default:\n 5m]'\n type: + string\n type: object\n type: object\n + \ etcdV3CompactionPeriod:\n description: + 'EtcdV3CompactionPeriod is the period between etcdv3\n compaction + requests. Set to 0 to disable. [Default: 10m]'\n type: string\n + \ healthChecks:\n description: 'HealthChecks + enables or disables support for health\n checks [Default: + Enabled]'\n type: string\n logSeverityScreen:\n + \ description: 'LogSeverityScreen is the log severity above + which\n logs are sent to the stdout. [Default: Info]'\n type: + string\n prometheusMetricsPort:\n description: + 'PrometheusMetricsPort is the TCP port that the Prometheus\n metrics + server should bind to. Set to 0 to disable. [Default:\n 9094]'\n + \ type: integer\n required:\n - + controllers\n type: object\n type: object\n type: + object\n served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: networkpolicies.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: NetworkPolicy\n listKind: NetworkPolicyList\n plural: + networkpolicies\n singular: networkpolicy\n scope: Namespaced\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n properties:\n egress:\n description: + The ordered set of egress rules. Each rule contains\n a set + of packet match criteria and a corresponding action to apply.\n items:\n + \ description: \"A Rule encapsulates a set of match criteria + and an\n action. Both selector-based security Policy and security + Profiles\n reference rules - separated out as a list of rules + for both ingress\n and egress packet matching. \\n Each positive + match criteria has\n a negated version, prefixed with ”Not”. + All the match criteria\n within a rule must be satisfied for + a packet to match. A single\n rule can contain the positive + and negative version of a match\n and both must be satisfied + for the rule to match.\"\n properties:\n action:\n + \ type: string\n destination:\n description: + Destination contains the match criteria that apply\n to + destination entity.\n properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n ingress:\n description: The ordered set + of ingress rules. Each rule contains\n a set of packet match + criteria and a corresponding action to apply.\n items:\n description: + \"A Rule encapsulates a set of match criteria and an\n action. + \ Both selector-based security Policy and security Profiles\n reference + rules - separated out as a list of rules for both ingress\n and + egress packet matching. \\n Each positive match criteria has\n a + negated version, prefixed with ”Not”. All the match criteria\n within + a rule must be satisfied for a packet to match. A single\n rule + can contain the positive and negative version of a match\n and + both must be satisfied for the rule to match.\"\n properties:\n + \ action:\n type: string\n destination:\n + \ description: Destination contains the match criteria that + apply\n to destination entity.\n properties:\n + \ namespaceSelector:\n description: + \"NamespaceSelector is an optional field that\n contains + a selector expression. Only traffic that originates\n from + (or terminates at) endpoints within the selected\n namespaces + will be matched. When both NamespaceSelector\n and + Selector are defined on the same rule, then only workload\n endpoints + that are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n order:\n description: Order is an optional + field that specifies the order in\n which the policy is applied. + Policies with higher \"order\" are applied\n after those with + lower order. If the order is omitted, it may be\n considered + to be \"infinite\" - i.e. the policy will be applied last. Policies\n with + identical order will be applied in alphanumerical order based\n on + the Policy \"Name\".\n type: number\n selector:\n + \ description: \"The selector is an expression used to pick pick + out\n the endpoints that the policy should be applied to. \\n + Selector\n expressions follow this syntax: \\n \\tlabel == \\\"string_literal\\\"\n + \ \\ -> comparison, e.g. my_label == \\\"foo bar\\\" \\tlabel + != \\\"string_literal\\\"\n \\ -> not equal; also matches if + label is not present \\tlabel in\n { \\\"a\\\", \\\"b\\\", \\\"c\\\", + ... } -> true if the value of label X is\n one of \\\"a\\\", + \\\"b\\\", \\\"c\\\" \\tlabel not in { \\\"a\\\", \\\"b\\\", \\\"c\\\",\n ... + } -> true if the value of label X is not one of \\\"a\\\", \\\"b\\\",\n \\\"c\\\" + \\thas(label_name) -> True if that label is present \\t! expr\n -> + negation of expr \\texpr && expr -> Short-circuit and \\texpr\n || + expr -> Short-circuit or \\t( expr ) -> parens for grouping \\tall()\n or + the empty selector -> matches all endpoints. \\n Label names are\n allowed + to contain alphanumerics, -, _ and /. String literals are\n more + permissive but they do not support escape characters. \\n Examples\n (with + made-up labels): \\n \\ttype == \\\"webserver\\\" && deployment\n == + \\\"prod\\\" \\ttype in {\\\"frontend\\\", \\\"backend\\\"} \\tdeployment !=\n + \ \\\"dev\\\" \\t! has(label_name)\"\n type: + string\n serviceAccountSelector:\n description: + ServiceAccountSelector is an optional field for an expression\n used + to select a pod based on service accounts.\n type: string\n types:\n + \ description: \"Types indicates whether this policy applies to + ingress,\n or to egress, or to both. When not explicitly specified + (and so\n the value on creation is empty or nil), Calico defaults + Types according\n to what Ingress and Egress are present in the + policy. The default\n is: \\n - [ PolicyTypeIngress ], if there + are no Egress rules (including\n the case where there are also + no Ingress rules) \\n - [ PolicyTypeEgress\n ], if there are + Egress rules but no Ingress rules \\n - [ PolicyTypeIngress,\n PolicyTypeEgress + ], if there are both Ingress and Egress rules.\n \\n When the + policy is read back again, Types will always be one\n of these + values, never empty or nil.\"\n items:\n description: + PolicyType enumerates the possible values of the PolicySpec\n Types + field.\n type: string\n type: array\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: networksets.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: NetworkSet\n listKind: NetworkSetList\n plural: networksets\n + \ singular: networkset\n scope: Namespaced\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n description: NetworkSet is the Namespaced-equivalent + of the GlobalNetworkSet.\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: NetworkSetSpec contains the specification + for a NetworkSet\n resource.\n properties:\n nets:\n + \ description: The list of IP networks that belong to this set.\n + \ items:\n type: string\n type: + array\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n---\n# Source: calico/templates/calico-kube-controllers-rbac.yaml\n\n# + Include a clusterrole for the kube-controllers component,\n# and bind it to the + calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n + \ name: calico-kube-controllers\nrules:\n # Nodes are watched to monitor for + deletions.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n + \ - watch\n - list\n - get\n # Pods are watched to check for existence + as part of IPAM controller.\n - apiGroups: [\"\"]\n resources:\n - pods\n + \ verbs:\n - get\n - list\n - watch\n # IPAM resources are manipulated + when nodes are deleted.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n + \ - ippools\n verbs:\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n + \ verbs:\n - get\n - list\n - create\n - update\n - + delete\n - watch\n # kube-controllers manages hostendpoints.\n - apiGroups: + [\"crd.projectcalico.org\"]\n resources:\n - hostendpoints\n verbs:\n + \ - get\n - list\n - create\n - update\n - delete\n # + Needs access to update clusterinformations.\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - clusterinformations\n verbs:\n - get\n - + create\n - update\n # KubeControllersConfiguration is where it gets its + config\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - kubecontrollersconfigurations\n + \ verbs:\n # read its own config\n - get\n # create a default + if none exists\n - create\n # update status\n - update\n # + watch for changes\n - watch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n + \ name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n + \ kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n - kind: ServiceAccount\n + \ name: calico-kube-controllers\n namespace: kube-system\n---\n\n---\n# Source: + calico/templates/calico-node-rbac.yaml\n# Include a clusterrole for the calico-node + DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: + rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The + CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n + \ resources:\n - pods\n - nodes\n - namespaces\n verbs:\n + \ - get\n # EndpointSlices are used for Service-based network policy rule\n + \ # enforcement.\n - apiGroups: [\"discovery.k8s.io\"]\n resources:\n - + endpointslices\n verbs:\n - watch\n - list\n - apiGroups: [\"\"]\n + \ resources:\n - endpoints\n - services\n verbs:\n # Used + to discover service IPs for advertisement.\n - watch\n - list\n # + Used to discover Typhas.\n - get\n # Pod CIDR auto-detection on kubeadm + needs access to config maps.\n - apiGroups: [\"\"]\n resources:\n - configmaps\n + \ verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n + \ verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - + patch\n # Calico stores some configuration information in node annotations.\n + \ - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: + [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n + \ - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: + [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n + \ verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n + \ - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - + patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - globalfelixconfigs\n - felixconfigurations\n - + bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n + \ - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n + \ - networkpolicies\n - networksets\n - clusterinformations\n - + hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n + \ - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: + [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n + \ - clusterinformations\n verbs:\n - create\n - update\n # Calico + stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n + \ - nodes\n verbs:\n - get\n - list\n - watch\n # These + permissions are only required for upgrade from v2.6, and can\n # be removed after + upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - + create\n - update\n # These permissions are required for Calico CNI to perform + IPAM allocations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n + \ - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n + \ - get\n - list\n - create\n - update\n - delete\n - + apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ipamconfigs\n + \ verbs:\n - get\n # Block affinities must also be watchable by confd + for route aggregation.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n + \ - blockaffinities\n verbs:\n - watch\n # The Calico IPAM migration + needs to get daemonsets. These permissions can be\n # removed if not upgrading + from an installation using host-local IPAM.\n - apiGroups: [\"apps\"]\n resources:\n + \ - daemonsets\n verbs:\n - get\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: + ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n + \ kind: ClusterRole\n name: calico-node\nsubjects:\n - kind: ServiceAccount\n + \ name: calico-node\n namespace: kube-system\n\n---\n# Source: calico/templates/calico-node.yaml\n# + This manifest installs the calico-node container, as well\n# as the CNI plugins + and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: + DaemonSet\napiVersion: apps/v1\nmetadata:\n name: calico-node\n namespace: kube-system\n + \ labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: + calico-node\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n + \ maxUnavailable: 1\n template:\n metadata:\n labels:\n k8s-app: + calico-node\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n + \ hostNetwork: true\n tolerations:\n # Make sure calico-node gets + scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n + \ # Mark the pod as a critical add-on for rescheduling.\n - key: + CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n + \ operator: Exists\n serviceAccountName: calico-node\n # Minimize + downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n + \ # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n + \ terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n + \ initContainers:\n # This container performs upgrade from host-local + IPAM to calico-ipam.\n # It can be deleted if this is a fresh installation, + or if you have already\n # upgraded to use calico-ipam.\n - name: + upgrade-ipam\n image: calico/cni:v3.20.0\n command: [\"/opt/cni/bin/calico-ipam\", + \"-upgrade\"]\n envFrom:\n - configMapRef:\n # + Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for + eBPF mode.\n name: kubernetes-services-endpoint\n optional: + true\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n + \ fieldRef:\n fieldPath: spec.nodeName\n - + name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config\n key: calico_backend\n + \ volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: + host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: + cni-bin-dir\n securityContext:\n privileged: true\n # + This container installs the CNI binaries\n # and CNI network config file + on each node.\n - name: install-cni\n image: calico/cni:v3.20.0\n + \ command: [\"/opt/cni/bin/install\"]\n envFrom:\n - + configMapRef:\n # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT + to be overridden for eBPF mode.\n name: kubernetes-services-endpoint\n + \ optional: true\n env:\n # Name of the CNI + config file to create.\n - name: CNI_CONF_NAME\n value: + \"10-calico.conflist\"\n # The CNI network config to install on each + node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config\n key: cni_network_config\n + \ # Set the hostname based on the k8s node name.\n - name: + KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: + spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config\n key: veth_mtu\n # Prevents the container + from sleeping forever.\n - name: SLEEP\n value: \"false\"\n + \ volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: + cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: + cni-net-dir\n securityContext:\n privileged: true\n # + Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n + \ # to communicate with Felix over the Policy Sync API.\n - name: + flexvol-driver\n image: calico/pod2daemon-flexvol:v3.20.0\n volumeMounts:\n + \ - name: flexvol-driver-host\n mountPath: /host/driver\n + \ securityContext:\n privileged: true\n containers:\n + \ # Runs calico-node container on each Kubernetes node. This\n # + container programs network policy and routes on each\n # host.\n - + name: calico-node\n image: calico/node:v3.20.0\n envFrom:\n + \ - configMapRef:\n # Allow KUBERNETES_SERVICE_HOST and + KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.\n name: + kubernetes-services-endpoint\n optional: true\n env:\n + \ # Use Kubernetes API as the backing datastore.\n - name: + DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the + datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n + \ # Set based on the k8s node name.\n - name: NODENAME\n + \ valueFrom:\n fieldRef:\n fieldPath: + spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config\n key: calico_backend\n # Cluster type + to identify the deployment type\n - name: CLUSTER_TYPE\n value: + \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: + IP\n value: \"autodetect\"\n # Enable VXLAN\n - + name: CALICO_IPV4POOL_VXLAN\n value: \"Always\"\n # Set + MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config\n key: veth_mtu\n # Set MTU for the + VXLAN tunnel device.\n - name: FELIX_VXLANMTU\n valueFrom:\n + \ configMapKeyRef:\n name: calico-config\n key: + veth_mtu\n # Set MTU for the Wireguard tunnel device.\n - + name: FELIX_WIREGUARDMTU\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config\n key: veth_mtu\n # + The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # + chosen from this range. Changing this value after installation will have\n # + no effect. This should fall within `--cluster-cidr`.\n # - name: CALICO_IPV4POOL_CIDR\n + \ # value: \"192.168.0.0/16\"\n # Disable file logging + so `kubectl logs` works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: + \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n + \ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n + \ # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n + \ value: \"false\"\n - name: FELIX_FEATUREDETECTOVERRIDE\n + \ value: \"ChecksumOffloadBroken=true\"\n - name: FELIX_HEALTHENABLED\n + \ value: \"true\"\n securityContext:\n privileged: + true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n + \ exec:\n command:\n - /bin/calico-node\n + \ - -felix-live\n periodSeconds: 10\n initialDelaySeconds: + 10\n failureThreshold: 6\n readinessProbe:\n exec:\n + \ command:\n - /bin/calico-node\n - + -felix-ready\n periodSeconds: 10\n volumeMounts:\n - + mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n readOnly: + false\n - mountPath: /lib/modules\n name: lib-modules\n + \ readOnly: true\n - mountPath: /run/xtables.lock\n name: + xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n + \ name: var-run-calico\n readOnly: false\n - + mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: + false\n - name: policysync\n mountPath: /var/run/nodeagent\n + \ # For eBPF mode, we need to be able to mount the BPF filesystem at + /sys/fs/bpf so we mount in the\n # parent directory.\n - + name: sysfs\n mountPath: /sys/fs/\n # Bidirectional + means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to + the host.\n # If the host is known to mount that filesystem already + then Bidirectional can be omitted.\n mountPropagation: Bidirectional\n + \ - name: cni-log-dir\n mountPath: /var/log/calico/cni\n + \ readOnly: true\n volumes:\n # Used by calico-node.\n + \ - name: lib-modules\n hostPath:\n path: /lib/modules\n + \ - name: var-run-calico\n hostPath:\n path: /var/run/calico\n + \ - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n + \ - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n + \ type: FileOrCreate\n - name: sysfs\n hostPath:\n path: + /sys/fs/\n type: DirectoryOrCreate\n # Used to install CNI.\n + \ - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n + \ - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n + \ # Used to access CNI logs.\n - name: cni-log-dir\n hostPath:\n + \ path: /var/log/calico/cni\n # Mount in the directory for host-local + IPAM allocations. This is\n # used when upgrading from host-local to calico-ipam, + and can be removed\n # if not using the upgrade-ipam init container.\n + \ - name: host-local-net-dir\n hostPath:\n path: /var/lib/cni/networks\n + \ # Used to create per-pod Unix Domain Sockets\n - name: policysync\n + \ hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n + \ # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n + \ hostPath:\n type: DirectoryOrCreate\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n---\n\napiVersion: + v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n\n---\n# + Source: calico/templates/calico-kube-controllers.yaml\n# See https://github.com/projectcalico/kube-controllers\napiVersion: + apps/v1\nkind: Deployment\nmetadata:\n name: calico-kube-controllers\n namespace: + kube-system\n labels:\n k8s-app: calico-kube-controllers\nspec:\n # The controllers + can only have a single active instance.\n replicas: 1\n selector:\n matchLabels:\n + \ k8s-app: calico-kube-controllers\n strategy:\n type: Recreate\n template:\n + \ metadata:\n name: calico-kube-controllers\n namespace: kube-system\n + \ labels:\n k8s-app: calico-kube-controllers\n spec:\n nodeSelector:\n + \ kubernetes.io/os: linux\n tolerations:\n # Mark the pod as + a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: + Exists\n - key: node-role.kubernetes.io/master\n effect: NoSchedule\n + \ serviceAccountName: calico-kube-controllers\n priorityClassName: system-cluster-critical\n + \ containers:\n - name: calico-kube-controllers\n image: calico/kube-controllers:v3.20.0\n + \ env:\n # Choose which controllers to run.\n - + name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n + \ value: kubernetes\n livenessProbe:\n exec:\n + \ command:\n - /usr/bin/check-status\n - + -l\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: + 6\n timeoutSeconds: 10\n readinessProbe:\n exec:\n + \ command:\n - /usr/bin/check-status\n - + -r\n periodSeconds: 10\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n + \ name: calico-kube-controllers\n namespace: kube-system\n\n---\n\n# This manifest + creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler + to evict\n\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n + \ name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: + calico-kube-controllers\nspec:\n maxUnavailable: 1\n selector:\n matchLabels:\n + \ k8s-app: calico-kube-controllers\n---\n# Source: calico/templates/calico-etcd-secrets.yaml\n\n---\n# + Source: calico/templates/calico-typha.yaml\n\n---\n# Source: calico/templates/configure-canal.yaml\n" +kind: ConfigMap +metadata: + creationTimestamp: null + name: calico-crs-configmap + namespace: default diff --git a/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/calico-crs.yaml b/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/calico-crs.yaml new file mode 100644 index 0000000000..acfe874639 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/calico-crs.yaml @@ -0,0 +1,13 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: calico-crs + namespace: default +spec: + clusterSelector: + matchLabels: + cni: calico + resources: + - kind: ConfigMap + name: calico-crs-configmap + diff --git a/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml b/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml new file mode 100644 index 0000000000..a0182960b8 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml @@ -0,0 +1,37 @@ +apiVersion: capi.weave.works/v1alpha1 +kind: ClusterBootstrapConfig +metadata: + name: capi-gitops + namespace: default +spec: + clusterSelector: + matchLabels: + weave.works/capi: bootstrap + jobTemplate: + generateName: "run-gitops-{{ .ObjectMeta.Name }}" + spec: + containers: + - image: ghcr.io/fluxcd/flux-cli:v0.29.5 + name: flux-bootstrap + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/gitops" + readOnly: true + args: + [ + "bootstrap", + "github", + "--kubeconfig=/etc/gitops/value", + "--owner=$GITHUB_USER", + "--repository=fleet-infra", + "--path=./clusters/{{ .ObjectMeta.Namespace }}/{{ .ObjectMeta.Name }}", + ] + envFrom: + - secretRef: + name: my-pat + restartPolicy: Never + volumes: + - name: kubeconfig + secret: + secretName: "{{ .ObjectMeta.Name }}-kubeconfig" diff --git a/website/versioned_docs/version-0.24.0/cluster-management/assets/profiles/profile-repo.yaml b/website/versioned_docs/version-0.24.0/cluster-management/assets/profiles/profile-repo.yaml new file mode 100644 index 0000000000..9e427fdb87 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/assets/profiles/profile-repo.yaml @@ -0,0 +1,9 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weaveworks-charts + namespace: flux-system +spec: + interval: 1m + url: https://weaveworks.github.io/weave-gitops-profile-examples/ +status: {} diff --git a/website/versioned_docs/version-0.24.0/cluster-management/assets/rbac/wego-admin.yaml b/website/versioned_docs/version-0.24.0/cluster-management/assets/rbac/wego-admin.yaml new file mode 100644 index 0000000000..54fdc43f79 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/assets/rbac/wego-admin.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wego-admin-cluster-role-binding +subjects: + - kind: User + name: wego-admin + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: wego-admin-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: wego-admin-cluster-role +rules: + - apiGroups: [""] + resources: ["secrets", "pods"] + verbs: ["get", "list"] + - apiGroups: ["apps"] + resources: ["deployments", "replicasets"] + verbs: ["get", "list"] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: ["kustomizations"] + verbs: ["get", "list", "patch"] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: ["helmreleases"] + verbs: ["get", "list", "patch"] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: ["get", "list", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - apiGroups: ["pac.weave.works"] + resources: ["policies"] + verbs: ["get", "list"] diff --git a/website/versioned_docs/version-0.24.0/cluster-management/cluster-api-providers.mdx b/website/versioned_docs/version-0.24.0/cluster-management/cluster-api-providers.mdx new file mode 100644 index 0000000000..cda145071e --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/cluster-api-providers.mdx @@ -0,0 +1,39 @@ +--- +title: Cluster API Providers +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Creating leaf clusters + +To enable leaf cluster creation, Weave GitOps leverages the Cluster-API (CAPI) providers for [AWS](https://cluster-api-aws.sigs.k8s.io/getting-started.html) or [Docker](https://cluster-api.sigs.k8s.io/user/quick-start.html). +In this section we cover the steps to deploy the providers on a Kubernetes cluster that is running the Weave GitOps. + +CAPI provides declarative APIs, controllers, and tooling to manage the lifecycle of Kubernetes clusters, across +a large number of [infrastructure providers](https://cluster-api.sigs.k8s.io/reference/providers.html#infrastructure). +The CAPI custom resource definitions are platform independent as each provider implementation handles the creation of VMs, +VPCs, networks and other required infrastructure parts, enabling consistent and repeatable cluster deployments. +For more information on the CAPI project, refer to the [CAPI book](https://cluster-api.sigs.k8s.io/introduction.html). + +## Configure and deploy the CAPI providers + +In all cases, CAPI requires kubectl access to an existing Kubernetes cluster, so in our case we configure `kubectl` to use the management cluster. + +```bash +export KUBECONFIG=/path/to/kubeconfig +``` + +## AWS provider (CAPA) + +After having configured `kubectl`, to deploy the CAPA components, follow the steps at https://cluster-api-aws.sigs.k8s.io/getting-started.html#install-clusterctl + +## Docker provider (CAPD) + +The Docker infrastructure provider is a reference implementation and is a practical way of testing the Weave GitOps cluster creation feature. It is not intended for production clusters. As CAPD will start docker containers in the host nodes of the management cluster, note that if you are using it with a `kind` cluster you'll need to mount the docker socket as described in the [Install and/or configure a kubernetes cluster](https://cluster-api-aws.sigs.k8s.io/getting-started.html#install-andor-configure-a-kubernetes-cluster) kind section. + +Similar to the AWS provider case, configure `kubectl` to use the management cluster, and to deploy the CAPD components follow the steps at https://cluster-api-aws.sigs.k8s.io/getting-started.html#install-clusterctl. diff --git a/website/versioned_docs/version-0.24.0/cluster-management/deleting-a-cluster.mdx b/website/versioned_docs/version-0.24.0/cluster-management/deleting-a-cluster.mdx new file mode 100644 index 0000000000..6ca6bdff47 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/deleting-a-cluster.mdx @@ -0,0 +1,21 @@ +--- +title: Deleting a Cluster +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Deleting a Cluster + +### How to: delete a cluster using UI + +- Select the cluster clusters you want to delete +- Press `CREATE A PR TO DELETE CLUSTERS` button +- Update the deletion PR values or leave the default values +- Press `Remove clusters` button +- Merge the create PR for clusters deletion + +### Notes + +A current limitation is the inability to apply an _empty_ repository to a cluster. If you have capi clusters and other manifests committed to this repository, and then _delete all of them_ so there are 0 manifests left, then the apply will fail and the resources will not be removed from the cluster. +A workaround is to add a dummy _ConfigMap_ back to the git repository after deleting everything else so that there is at least 1 manifest to apply. diff --git a/website/versioned_docs/version-0.24.0/cluster-management/disable-capi.mdx b/website/versioned_docs/version-0.24.0/cluster-management/disable-capi.mdx new file mode 100644 index 0000000000..a74c0595d5 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/disable-capi.mdx @@ -0,0 +1,52 @@ +--- +title: Disabling CAPI Support +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Disabling CAPI Support + +If you do not need CAPI-based Cluster Management support, you can disable CAPI +via the Helm Chart values. + +Update your Weave GitOps Enterprise `HelmRelease` object with the +`global.capiEnabled` value set to `false`: + +```yaml {33-35} title='clusters/management/weave-gitops-enterprise.yaml' +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weave-gitops-enterprise-charts + namespace: flux-system +spec: + interval: 60m + secretRef: + name: weave-gitops-enterprise-credentials + url: https://charts.dev.wkp.weave.works/releases/charts-v3 +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops-enterprise + namespace: flux-system +spec: + chart: + spec: + interval: 65m + chart: mccp + sourceRef: + kind: HelmRepository + name: weave-gitops-enterprise-charts + namespace: flux-system + version: 0.12.0 + install: + crds: CreateReplace + upgrade: + crds: CreateReplace + interval: 50m + values: + global: + capiEnabled: false +``` diff --git a/website/versioned_docs/version-0.24.0/cluster-management/getting-started.mdx b/website/versioned_docs/version-0.24.0/cluster-management/getting-started.mdx new file mode 100644 index 0000000000..18e74b7cc0 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/getting-started.mdx @@ -0,0 +1,259 @@ +--- +title: Getting started +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Getting started + +## Creating your first CAPD Cluster + +If you've followed the [Installation guide](installation/weave-gitops-enterprise/index.mdx) you should have: + +1. Weave GitOps Enterprise installed +2. A CAPI provider installed (With support for `ClusterResourceSet`s enabled). + +Next up we'll add a template and use it to create a cluster. + +### Directory structure + +Let's setup a directory structure to manage our clusters + +```bash +mkdir -p clusters/bases \ + clusters/management/capi/templates \ + clusters/management/capi/bootstrap \ + clusters/management/capi/profiles +``` + +Now we should have: + +```bash +. +└── clusters + ├── bases + └── management + └── capi + ├── bootstrap + ├── profiles + └── templates +``` + +This assumes that we've configured flux to reconcile everything in `clusters/management` into our management cluster. + +To keep things organized we've created some subpaths for the different resources: + +- `bases` for any common resources between clusters like RBAC and policy. +- `templates` for `GitOpsTemplates` +- `bootstrap` for `ClusterBootstrapConfig`, `ClusterResourceSet` and the `ConfigMap` they reference +- `profiles` for the `HelmRepository` of the profiles for the newly created clusters + +Lets grab some sample resources to create our first cluster! + +### Add common RBAC to the repo + +When a cluster is provisioned, by default it will reconcile all the manifests in `./clusters//` and `./clusters/bases`. + +To display Applications and Sources in the UI we need to give the logged in user permissions to inspect the new cluster. + +Adding common rbac rules to `./clusters/bases/rbac` is an easy way to configure this! + +import WegoAdmin from "!!raw-loader!./assets/rbac/wego-admin.yaml"; + + + {() => ( + + curl -o clusters/bases/rbac/wego-admin.yaml {window.location.protocol}// + {window.location.host} + {require("./assets/rbac/wego-admin.yaml").default} + + )} + + +
Expand to see full template yaml + + + {WegoAdmin} + + +
+ +### Add a template + +See [CAPI Templates](../gitops-templates/intro.mdx) page for more details on this topic. Once we load a template we can use it in the UI to create clusters! + +import CapdTemplate from "!!raw-loader!../assets/templates/capd-template.yaml"; + +Download the template below to your config repository path, then commit and push to your git origin. + + + {() => ( + + curl -o clusters/management/capi/templates/capd-template.yaml{" "} + {window.location.protocol}//{window.location.host} + {require("../assets/templates/capd-template.yaml").default} + + )} + + +
Expand to see full template yaml + + + {CapdTemplate} + + +
+ +## Automatically install a CNI with `ClusterResourceSet`s + +We can use `ClusterResourceSet`s to automatically install CNI's on a new cluster, here we use calico as an example. + +### Add a CRS to install a CNI + +Create a calico configmap and a CRS as follows: + +import CalicoCRS from "!!raw-loader!./assets/bootstrap/calico-crs.yaml"; + + + {() => ( + + curl -o clusters/management/capi/bootstrap/calico-crs.yaml{" "} + {window.location.protocol}//{window.location.host} + {require("./assets/bootstrap/calico-crs.yaml").default} + {"\n"} + curl -o clusters/management/capi/bootstrap/calico-crs-configmap.yaml { + window.location.protocol + }//{window.location.host} + {require("./assets/bootstrap/calico-crs-configmap.yaml").default} + + )} + + + + {CalicoCRS} + + +The full [`calico-crs-configmap.yaml`](./assets/bootstrap/calico-crs-configmap.yaml) is a bit large to display inline here but make sure to download it to `clusters/management/capi/bootstrap/calico-crs-configmap.yaml` too, manually or with the above `curl` command. + +## Profiles and clusters + +WGE can automatically install profiles onto new clusters + +#### Add a helmrepo + +import ProfileRepo from "!!raw-loader!./assets/profiles/profile-repo.yaml"; + +Download the profile repository below to your config repository path then commit and push. Make sure to update the url to point to a Helm repository containing your profiles. + + + {() => ( + + curl -o clusters/management/capi/profiles/profile-repo.yaml{" "} + {window.location.protocol} + //{window.location.host} + {require("./assets/profiles/profile-repo.yaml").default} + + )} + + + + {ProfileRepo} + + +For more information about profiles, see [profiles from private helm repositories](./profiles.mdx/#optional-using-a-helm-chart-from-a-remote-publicprivate-repository), [policy profiles](../policy/weave-policy-profile.mdx), and [eso secrets profiles](../secrets/setup-eso.mdx). + +#### Add a cluster bootstrap config + +Create a cluster bootstrap config as follows: + +```bash + kubectl create secret generic my-pat --from-literal GITHUB_TOKEN=$GITHUB_TOKEN +``` + +import CapiGitopsCDC from "!!raw-loader!./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml"; + +Download the config with + + + {() => ( + + curl -o + clusters/management/capi/bootstrap/capi-gitops-cluster-bootstrap-config.yaml{" "} + {window.location.protocol} + //{window.location.host} + { + require("./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml") + .default + } + + )} + + +Then update the `GITOPS_REPO` variable to point to your cluster + +
Expand to see full yaml + + + {CapiGitopsCDC} + + +
+ +#### Add Monitoring Dashboards to your cluster + +In order to add dashboards to your cluster, you'll need to use metadata annotations following the below pattern. + +``` +apiVersion: gitops.weave.works/v1alpha1 +kind: GitopsCluster +metadata: + annotations: + metadata.weave.works/dashboard.grafana: https://grafana.com/ + metadata.weave.works/dashboard.prometheus: https://prometheus.io/ +``` + +#### Specifying CAPI cluster kinds + +To be able to explicitly specify the type of cluster, you need to use metadata annotations using `weave.works/cluster-kind` for the annotation key as the below pattern: + +``` +apiVersion: gitops.weave.works/v1alpha1 +kind: GitopsCluster +metadata: + annotations: + weave.works/cluster-kind: +``` +where the **CLUSTER_KIND** can be one of the following supported ones: + - DockerCluster + - AWSCluster + - AWSManagedCluster + - AzureCluster + - AzureManagedCluster + - GCPCluster + - MicrovmCluster + - Rancher + - Openshift + - Tanzu + - OtherOnprem + +## Test + +You should now be able to create a new cluster from your template and install profiles onto it with a single Pull Request via the WGE UI! diff --git a/website/versioned_docs/version-0.24.0/cluster-management/gitrepo-selection.mdx b/website/versioned_docs/version-0.24.0/cluster-management/gitrepo-selection.mdx new file mode 100644 index 0000000000..0d39ae7f5c --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/gitrepo-selection.mdx @@ -0,0 +1,67 @@ +--- +title: Git Repositories and Resources +--- + +import TierLabel from "../_components/TierLabel"; + +# Git Repositories and Resources + +During the pull request creation, to authenticate using Git, you will need to select the git repository where the pull request will be created. + +## The default git repository selected in the UI + +Depending on the action performed on the resource (creation/deletion/editing), the default git repository selected in the UI is determined in the following order: + +1. the repository used to initially create the resource found in the `templates.weave.works/create-request` annotation (in the case of editing or deleting of resources) + ```yaml + metadata: + annotations: + templates.weave.works/create-request: "{...\"parameter_values\":{...\"url\":\"https://github.com/weave-example-org/weave-demo\"}" + ``` + +2. the first repository found with a `weave.works/repo-role: default` annotation + ```yaml + metadata: + annotations: + weave.works/repo-role: default + ``` + +3. the flux-system repository + ```yaml + metadata: + name: flux-system + namespace: flux-system + ``` + +4. the first repository in the list of git repositories that the user has access to + +In the case of deletion and editing, if the resource repository is found amongst the git repositories that the user has access to, it will be preselected and the selection will be disabled. If it is not found, the user will be able to choose a new repository. + +In the case of tenants, its recommended the `weave.works/repo-role: default` is added to an appropriate git repository. + +## Overriding the calculated git repository HTTPS URL + +The system will try and automatically calculate the correct HTTPS API endpoint to create a Pull Request against. For example, if the git repository URL is `ssh://git@github.com/org/repo.git`, the system will try and convert it to `https://github.com/org/repo.git`. + +However it is not always possible to accurately derive this URL. An override can be specified to set the correct URL instead. For example the ssh url may be `ssh://git@interal-ssh-server:2222/org/repo.git` and the correct HTTPS url may be `https://gitlab.example.com/org/repo.git`. + +In this case we set the override via the `weave.works/repo-https-url` annotation on the `GitRepository` object: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: repo + namespace: flux-system + annotations: + // highlight-start + weave.works/repo-https-url: https://gitlab.example.com/org/repo.git + // highlight-end +spec: + interval: 1m + url: ssh://git@interal-ssh-server:2222/org/repo.git +``` + +The pull request will then be created against the correct HTTPS API. + +The above also applies to the creation of applications. diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-btn.png b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-btn.png new file mode 100644 index 0000000000..e4efad3c97 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-btn.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-form.png b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-form.png new file mode 100644 index 0000000000..35abd63d05 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-form.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-helm-release.png b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-helm-release.png new file mode 100644 index 0000000000..3405d63876 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-helm-release.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-kustomization.png b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-kustomization.png new file mode 100644 index 0000000000..fdd1fab580 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-kustomization.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-select-source.png b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-select-source.png new file mode 100644 index 0000000000..ce3998e4bc Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/add-application-select-source.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/disconnect-cluster.png b/website/versioned_docs/version-0.24.0/cluster-management/img/disconnect-cluster.png new file mode 100644 index 0000000000..5a08b5afbc Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/disconnect-cluster.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/identity-selection.png b/website/versioned_docs/version-0.24.0/cluster-management/img/identity-selection.png new file mode 100644 index 0000000000..c1ca94f155 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/identity-selection.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/img/profile-selection.png b/website/versioned_docs/version-0.24.0/cluster-management/img/profile-selection.png new file mode 100644 index 0000000000..4f0243b070 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/cluster-management/img/profile-selection.png differ diff --git a/website/versioned_docs/version-0.24.0/cluster-management/intro.mdx b/website/versioned_docs/version-0.24.0/cluster-management/intro.mdx new file mode 100644 index 0000000000..71023b39a6 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/intro.mdx @@ -0,0 +1,14 @@ +--- +title: Introduction +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Cluster management + +Weave GitOps Enterprise (WGE) can provision Kubernetes clusters using any of the CAPI providers installed. The lifecycle management of these clusters is done declaratively via GitOps and WGE simplifies this process by providing both a Web UI and a CLI to interact with and manage these clusters. \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/cluster-management/managing-existing-clusters.mdx b/website/versioned_docs/version-0.24.0/cluster-management/managing-existing-clusters.mdx new file mode 100644 index 0000000000..b40849670c --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/managing-existing-clusters.mdx @@ -0,0 +1,288 @@ +--- +title: Managing existing clusters +hide_title: true +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; + +# Managing existing clusters + +### Managing non-capi clusters {#how-to-connect-a-cluster} + +Any kubernetes cluster whether capi or not can be added to Weave Gitops Enterprise. The only thing we need is a secret containing a valid `kubeconfig`. + +import TOCInline from "@theme/TOCInline"; +; + + + + +If you already have a `kubeconfig` stored in a secret in your management cluster, continue below to create a `GitopsCluster`. + +If you have a kubeconfig, you can load in into the cluster like so: + +``` +kubectl create secret generic demo-01-kubeconfig \ +--from-file=value=./demo-01-kubeconfig +``` + + + + +### How to create a kubeconfig secret using a service account + +1. Create a new service account on the remote cluster: + + ```yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: demo-01 + namespace: default + ``` + +1. Add RBAC permissions for the service account + +
Expand to see role manifests + + ```yaml + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: impersonate-user-groups + subjects: + - kind: ServiceAccount + name: demo-01 + namespace: default + roleRef: + kind: ClusterRole + name: user-groups-impersonator + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: user-groups-impersonator + rules: + - apiGroups: [""] + resources: ["users", "groups"] + verbs: ["impersonate"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + ``` + +
+ + This will allow WGE to introspect the cluster for available namespaces. + + Once we know what namespaces are available we can test whether the logged in user can access them via impersonation. + +1. Get the token of the service account + + First get the list of secrets of the service accounts by running the following command: + + ```sh + kubectl get secrets --field-selector type=kubernetes.io/service-account-token + NAME TYPE DATA AGE + default-token-lsjz4 kubernetes.io/service-account-token 3 13d + demo-01-token-gqz7p kubernetes.io/service-account-token 3 99m + ``` + + `demo-01-token-gqz7p` is the secret that holds the token for `demo-01` service account + + To get the token of the service account run the following command: + + ```sh + TOKEN=$(kubectl get secret demo-01-token-gqz7p -o jsonpath={.data.token} | base64 -d) + ``` + +1. Create a kubeconfig secret + + We'll use a helper script to generate the kubeconfig, save this into `static-kubeconfig.sh`: + +
Expand to see script + + ```bash title="static-kubeconfig.sh" + #!/bin/bash + + if [[ -z "$CLUSTER_NAME" ]]; then + echo "Ensure CLUSTER_NAME has been set" + exit 1 + fi + + if [[ -z "$CA_CERTIFICATE" ]]; then + echo "Ensure CA_CERTIFICATE has been set to the path of the CA certificate" + exit 1 + fi + + if [[ -z "$ENDPOINT" ]]; then + echo "Ensure ENDPOINT has been set" + exit 1 + fi + + if [[ -z "$TOKEN" ]]; then + echo "Ensure TOKEN has been set" + exit 1 + fi + + export CLUSTER_CA_CERTIFICATE=$(cat "$CA_CERTIFICATE" | base64) + + envsubst < + + For the next step, the cluster certificate (CA) is needed. How you get hold of the certificate depends on the cluster. For GKE you can view it on the GCP Console: Cluster->Details->Endpoint->”Show cluster certificate”. You will need to copy the contents of the certificate into the `ca.crt` file used below. + + ```sh + CLUSTER_NAME=demo-01 \ + CA_CERTIFICATE=ca.crt \ + ENDPOINT= \ + TOKEN= ./static-kubeconfig.sh > demo-01-kubeconfig + ``` + + Replace the following: + + - CLUSTER_NAME: the name of your cluster i.e. `demo-01` + - ENDPOINT: the API server endpoint i.e. `34.218.72.31` + - CA_CERTIFICATE: path to the CA certificate file of the cluster + - TOKEN: the token of the service account retrieved in the previous step + + Finally create a secret for the generated kubeconfig: + + ```sh + kubectl create secret generic demo-01-kubeconfig \ + --from-file=value=./demo-01-kubeconfig + ``` + + + + +### Connect a cluster + +:::tip Before you start! + +Make sure you've + +1. Added some common RBAC rules into the `clusters/bases` folder, as described in [Getting started](./getting-started.mdx). +2. Configured the cluster bootstrap controller as described in [Getting started](./getting-started.mdx). + +::: + +Create a `GitopsCluster` + +```yaml title="./clusters/management/clusters/demo-01.yaml" +apiVersion: gitops.weave.works/v1alpha1 +kind: GitopsCluster +metadata: + name: demo-01 + namespace: default + # Signals that this cluster should be bootstrapped. + labels: + weave.works/capi: bootstrap +spec: + secretRef: + name: demo-01-kubeconfig +``` + +When the `GitopsCluster` appears in the cluster, the Cluster Bootstrap Controller will install flux on it and by default start reconciling the `./clusters/demo-01` path in your management cluster's git repository. To inspect the Applications and Sources running on the new cluster we need to give permissions to the user accessing the UI. Common RBAC rules like this should be stored in `./clusters/bases`. Here we create a kustomziation to add these common resources onto our new cluster: + +```yaml title="./clusters/demo-01/clusters-bases-kustomization.yaml" +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + creationTimestamp: null + name: clusters-bases-kustomization + namespace: flux-system +spec: + interval: 10m0s + path: clusters/bases + prune: true + sourceRef: + kind: GitRepository + name: flux-system +``` + +Save these 2 files into your git repository. Commit and push. + +Once flux has reconciled the cluster you can inspect your flux resources via the UI! + +## Debugging + +### How to test a kubeconfig secret in a cluster + +To test a kubeconfig secret has been correctly setup apply the following manifest and check the logs after the job completes: + +
Expand to see manifest + +```yaml +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kubectl +spec: + ttlSecondsAfterFinished: 30 + template: + spec: + containers: + - name: kubectl + image: bitnami/kubectl + args: + [ + "get", + "pods", + "-n", + "kube-system", + "--kubeconfig", + "/etc/kubeconfig/value", + ] + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubeconfig" + readOnly: true + restartPolicy: Never + volumes: + - name: kubeconfig + secret: + secretName: demo-01-kubeconfig + optional: false +``` + +
+ +In the manifest above `demo-01-kubeconfig` is the name of the secret that contains the kubeconfig for the remote cluster. + +--- + +# Background + +- [Authentication strategies](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#authentication-strategies) + - [X509 client certificates](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#x509-client-certs): can be used across different namespaces + - [Service account tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#service-account-tokens): limited to a single namespace +- [Kubernetes authentication 101 (CNCF blog post)](https://www.cncf.io/blog/2020/07/31/kubernetes-rbac-101-authentication/) +- [Kubernetes authentication (Magalix blog post)](https://www.magalix.com/blog/kubernetes-authentication) diff --git a/website/versioned_docs/version-0.24.0/cluster-management/profiles.mdx b/website/versioned_docs/version-0.24.0/cluster-management/profiles.mdx new file mode 100644 index 0000000000..1042d13d84 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/profiles.mdx @@ -0,0 +1,149 @@ +--- +title: Profiles +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Profiles + +:::note BEFORE YOU START +The following instructions require you to make minor changes to the content of your own hosted Helm repository. +::: + +When provisioning new clusters it is often useful to install selected software packages to them as part of their bootstrap process. Weave GitOps Enterprise enables this by installing standard Helm charts to the newly provisioned clusters. This feature lowers the ongoing operational overhead and allows for the clusters to be immediately usable after being provisioned. To set this up you need to: + +1. Annotate a Helm chart to make it available for installation +2. Select which profiles you want installed when creating a cluster + +### 1. Annotate a Helm chart to make it available for installation + +In order for a chart to become available for installation, it needs to include a `weave.works/profile` annotation. For example: + +```yaml title="Chart.yaml" +annotations: + weave.works/profile: observability-profile +apiVersion: v1 +appVersion: 1.0.0 +description: Observability Helm chart for Kubernetes +home: https://github.com/weaveworks/observability-profile +kubeVersion: ">=1.19.0-0" +name: observability +sources: + - https://github.com/weaveworks/observability-profile +version: 1.0.0 +``` + +The annotation value is not important and can be left blank i.e. `""`. Helm charts with the `weave.works/profile` annotation are called _Profiles_. + +Annotations can also be used to determine the order in which profiles will be installed. + +``` +annotations: + weave.works/profile: observability-profile + weave.works/layer: layer-0 +``` + +``` +annotations: + weave.works/profile: podinfo-profile + weave.works/layer: layer-1 +``` + +The profiles will be sorted lexicographically by their layer and those at a higher layer will only be installed after lower layers have been successfully installed and started. + +In this example, `observability-profile` will be installed prior to `podinfo-profile`. In the corresponding HelmReleases, the dependencies can be observed under the `dependsOn` field. + +``` +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + labels: + weave.works/applied-layer: layer-0 + name: cluster-name-observability + namespace: wego-system +... +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + labels: + weave.works/applied-layer: layer-1 + name: cluster-name-podinfo + namespace: wego-system +spec: +... + dependsOn: + - name: cluster-name-observability +... +``` + +#### (Optional) Using a helm chart from a remote public/private repository +The helm releases with the profiles can be added in a remote repository which can be referenced using a HelmRepository resource.The repository can be either public or private, although extra steps are required to use the private repo. + +In this example a public repo and branch is referenced directly where the helm releases are +```yaml title="HelmRepository.yaml" +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository +metadata: + name: weaveworks-charts + namespace: flux-system +spec: + interval: 1m + url: https://weaveworks.github.io/weave-gitops-profile-examples/ +``` + +To be able to use private repositories with restricted access, a secret can be used and synced to the target leaf cluster using [SecretSync](../secrets/bootstraping-secrets.mdx). + +The secret is referenced in the SecretSync as `spec.secretRef` and the labels of the target leaf cluster are added for the syncer to match clusters against those labels using `spec.clusterSelector.matchLabels`. +```yaml title="SecretSync.yaml" +apiVersion: capi.weave.works/v1alpha1 +kind: SecretSync +metadata: + name: my-dev-secret-syncer + namespace: flux-system +spec: + clusterSelector: + matchLabels: + weave.works/capi: bootstrap + secretRef: + name: weave-gitops-enterprise-credentials + targetNamespace: flux-system +``` + +Once the SecretSync and Secret are available, the secret can be directly referenced in the HelmRepsitory object directly +```yaml title="PrivateHelmRepository.yaml" +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weaveworks-charts + namespace: flux-system +spec: + interval: 60m + secretRef: + name: weave-gitops-enterprise-credentials + url: https://charts.dev.wkp.weave.works/releases/charts-v3 + ``` +**note**: The `HelmRepoSecret`, `SecretSync`, and the `GitopsCluster` should all be in the same namespace. + +### 2. Select which profiles you want installed when creating a cluster + +Currently WGE inspects the current namespace that it is deployed in (in the management cluster) for a `HelmRepository` object named `weaveworks-charts`. This Kubernetes object should be pointing to a Helm chart repository that includes the profiles that are available for installation. + +When creating a cluster from the UI using a CAPI template, these profiles should be available for selection in the `Profiles` section of the template. For example: + +![Profiles Selection](./img/profile-selection.png) + +As shown above, some profiles will be optional whereas some profiles will be required. This is determined when the template is authored and allows for operation teams to control which Helm packages should be installed on new clusters by default. + +To allow editing of the yaml values for required profiles, the `editable` flag can be added in the annotation describing the required profile in the template. For example: + +``` +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: connect-a-cluster-with-policies + namespace: default + annotations: + capi.weave.works/profile-0: '{"name": "weave-policy-agent", "editable": true, "version": "0.2.8", "values": "accountId: weaveworks\nclusterId: ${CLUSTER_NAME}" }' +``` diff --git a/website/versioned_docs/version-0.24.0/cluster-management/provider-identities.mdx b/website/versioned_docs/version-0.24.0/cluster-management/provider-identities.mdx new file mode 100644 index 0000000000..9a3563a351 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/cluster-management/provider-identities.mdx @@ -0,0 +1,82 @@ +--- +title: CAPI Provider Identities +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# CAPI Provider Identities + +## Multi-tenancy + +Some Cluster API providers allow you to choose the account or identity that the new cluster will be created with. This is often referred to as _Multi-tenancy_ in the CAPI world. Weave GitOps currently supports: + +- [**AWS** multi-tenancy](https://cluster-api-aws.sigs.k8s.io/topics/multitenancy.html) +- [**Azure** multi-tenancy](https://capz.sigs.k8s.io/topics/multitenancy.html) +- [**vSphere** multi-tenancy](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/blob/master/docs/identity_management.md) + +### Identities and templates + +Our _templates_ describe the properties of the cluster, how many nodes, what version of Kubernetes etc, while the _identity_ is which account will be used to create the cluster. So given in our cluster we have the template: + +```yaml +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: capa-cluster-template +spec: + resourcetemplates: + - contents: + - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AWSCluster + metadata: + name: "${CLUSTER_NAME}" + spec: + region: "${AWS_REGION}" +``` + +and the identity + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AWSClusterStaticIdentity +metadata: + name: "test-account" +spec: + secretRef: + name: test-account-creds + namespace: capa-system + allowedNamespaces: + selector: + matchLabels: + cluster.x-k8s.io/ns: "testlabel" +``` + +We can select ask Weave GitOps to use the `test-account` when creating the cluster by using the _Infrastructure provider credentials_ dropdown on the _Create new cluster with template_ page: + +![Identity Selection](./img/identity-selection.png) + +The resulting definition will have the identity injected into the appropriate place in the template, for this example: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AWSCluster +metadata: + name: example-cluster +spec: + region: eu-north-1 + identityRef: + kind: AWSClusterStaticIdentity + name: test-account +``` + +### `identityRef`s + +The supported providers implement multi-tenancy by setting an `identityRef` on the the provider cluster object, e.g. `AWSCluster`, `AzureCluster` or `VSphereCluster`. + +Weave GitOps will search _all namespaces_ in the cluster for potential identities that can be used to create a cluster. The following identity `kind`s are currently supported and their corresponding Cluster kinds: + +- `AWSClusterStaticIdentity`: `AWSCluster` +- `AWSClusterRoleIdentity`: `AWSCluster` +- `AzureClusterIdentity`: `AzureCluster` +- `VSphereClusterIdentity`: `VSphereCluster` diff --git a/website/versioned_docs/version-0.24.0/configuration/emergency-user.mdx b/website/versioned_docs/version-0.24.0/configuration/emergency-user.mdx new file mode 100644 index 0000000000..c5b1548aa2 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/configuration/emergency-user.mdx @@ -0,0 +1,106 @@ +--- +title: Emergency Cluster User Account +--- + +:::danger Important +This is an **insecure** method of securing your dashboard which we only recommend for local +and development environments, or if you need to activate emergency access to a damaged cluster. + +Note also that this mechanism only exists for a single user: you will not be able to +create multiple users. Weave GitOps does not provide its own authentication mechanism, +for secure and fully-featured authentication we **strongly recommend** using an OIDC provider as described [here](../oidc-access). +::: + +## Configuring the emergency user + +Before you login via the emergency user account, you need to generate a bcrypt hash for your chosen password and store it as a secret in Kubernetes. +There are several different ways to generate a bcrypt hash, this guide uses `gitops get bcrypt-hash` from our CLI. + +Generate the password by running: + +```sh +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash +$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q +``` + +Now create a Kubernetes secret to store your chosen username and the password hash: + +```sh +kubectl create secret generic cluster-user-auth \ + --namespace flux-system \ + --from-literal=username=admin \ + --from-literal=password='$2a$10$OS5NJmPNEb13UTOSKngMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q' +``` + +You should now be able to login via the cluster user account using your chosen username and password. + +## Updating the emergency user + +To change either the username or the password, recreate the `cluster-user-auth` +with the new details. + +Note that only one emergency user can be created this way. To add more users, +enable an [OIDC provider](../oidc-access). + +## User permissions + +By default both a ClusterRole and Role are generated for the emergency user. +Both have the same permissions with former being optional and the latter being +bound to the `flux-system` namespace (where Flux stores its resources by default). +The default set of rules are configured like this: + +```yaml +rules: + # Flux Resources + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: [ "notification.toolkit.fluxcd.io" ] + resources: [ "providers", "alerts" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["infra.contrib.fluxcd.io"] + resources: ["terraforms"] + verbs: [ "get", "list", "watch", "patch" ] + + # Read access for all other Kubernetes objects + - apiGroups: ["*"] + resources: ["*"] + verbs: [ "get", "list", "watch" ] +``` + +These permissions give the emergency user Administrator level powers. **We do not +advise leaving it active on production systems**. + +If required, the permissions can be expanded with the `rbac.additionalRules` field in the +[Helm Chart](../references/helm-reference.md). +Follow the instructions in the next section in order to configure RBAC correctly. + +:::tip +To remove the emergency user as a login method, set the following values in the +[Helm Chart](../references/helm-reference.md): + +```yaml +# +adminUser: + create: false +# +additionalArgs: +- --auth-methods=oidc +# +``` + +If you are disabling an already existing emergency user, you will need to +manually delete the Kubernetes Secret and any User Roles which were created on +the cluster. +::: diff --git a/website/versioned_docs/version-0.24.0/configuration/oidc-access.mdx b/website/versioned_docs/version-0.24.0/configuration/oidc-access.mdx new file mode 100644 index 0000000000..c877770616 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/configuration/oidc-access.mdx @@ -0,0 +1,108 @@ +--- +title: OIDC Provider +--- + +# Login via an OIDC provider + +You may decide to give your engineering teams access to the dashboard, in order to view and manage their workloads. In this case, you will want to secure access to the dashboard and restrict who can interact with it. Weave GitOps integrates with your OIDC provider and uses standard Kubernetes RBAC to give you fine-grained control of the permissions for the dashboard users. + +## Background + +OIDC extends the OAuth2 authorization protocol by including an additional field (ID Token) that contains information (claims) about a user's identity. After a user successfully authenticates with the OIDC provider, this information is used by Weave GitOps to impersonate the user in any calls to the Kubernetes API. This allows cluster administrators to use RBAC rules to control access to the cluster and also the dashboard. + +## Configuration + +In order to login via your OIDC provider, you need to create a Kubernetes secret to store the OIDC configuration. This configuration consists of the following parameters: + +| Parameter | Description | Default | +| ------------------| -------------------------------------------------------------------------------------------------------------------------------- | --------- | +| `issuerURL` | The URL of the issuer, typically the discovery URL without a path | | +| `clientID` | The client ID that has been setup for Weave GitOps in the issuer | | +| `clientSecret` | The client secret that has been setup for Weave GitOps in the issuer | | +| `redirectURL` | The redirect URL that has been setup for Weave GitOps in the issuer, typically the dashboard URL followed by `/oauth2/callback ` | | +| `tokenDuration` | The time duration that the ID Token will remain valid, after successful authentication | "1h0m0s" | + +Ensure that your OIDC provider has been setup with a client ID/secret and the redirect URL of the dashboard. + +Create a secret named `oidc-auth` in the `flux-system` namespace with these parameters set: + +```sh +kubectl create secret generic oidc-auth \ + --namespace flux-system \ + --from-literal=issuerURL= \ + --from-literal=clientID= \ + --from-literal=clientSecret= \ + --from-literal=redirectURL= \ + --from-literal=tokenDuration= +``` + +Once the HTTP server starts, unauthenticated users will have to click 'Login With OIDC Provider' to log in or use the cluster account (if configured). Upon successful authentication, the users' identity will be impersonated in any calls made to the Kubernetes API, as part of any action they take in the dashboard. By default the Helm chart will configure RBAC correctly but it is recommended to read the [service account](service-account-permissions.mdx) and [user](user-permissions.mdx) permissions pages to understand which actions are needed for Weave GitOps to function correctly. + +## Customizing + +For some OIDC configurations, you may need to customise the requested [scopes](https://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims) or [claims](https://openid.net/specs/openid-connect-core-1_0.html#Claims). + +### Scopes + +By default, the following scopes are requested "openid","offline_access","email","groups". + +The "openid" scope is **mandatory** for OpenID auth, and the "email", and "groups" scopes are commonly used as unique identifiers in organisations. + +We use "offline_access" to allow us to refresh OIDC tokens to keep login sessions alive for as long as a refresh token is valid. + +You can however change the defaults. +```sh +kubectl create secret generic oidc-auth \ + --namespace flux-system \ + --from-literal=issuerURL= \ + --from-literal=clientID= \ + --from-literal=clientSecret= \ + --from-literal=redirectURL= \ + --from-literal=tokenDuration= \ + --from-literal=customScopes=custom,scopes +``` +The format for the `customScopes` key is a comma-separated list of scopes to request, in this case, "custom" and "scopes" would be requested, in addition to "openid". + +### Claims + +By default, the following claims are parsed from the OpenID [ID Token](https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken) "email" and "groups", these are presented as the `user` and `groups` when we communicate with your Kubernetes API server. + +This is equivalent to [configuring](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server) your `kube-apiserver` with `--oidc-username-claim=email --oidc-groups-claim=groups`. + +Again, you can configure these from the `oidc-auth` `Secret`. + +```sh +kubectl create secret generic oidc-auth \ + --namespace flux-system \ + --from-literal=issuerURL= \ + --from-literal=clientID= \ + --from-literal=clientSecret= \ + --from-literal=redirectURL= \ + --from-literal=tokenDuration= \ + --from-literal=claimUsername=sub \ + --from-literal=claimGroups=groups +``` +There are two separate configuration keys, you can override them separately, these should match your `kube-apiserver` configuration. + +### Login UI + +The label of the OIDC button on the login screen is configurable via a feature flag environment variable. +This can give your users a more familiar experience when logging in. + +Adjust the configuration in the helm `values.yaml` file or the `spec.values` section of the Weave Gitops `HelmRelease` resource: + +#### Weave Gitops + +```yaml +envVars: + - name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL + value: "Login with ACME" +``` + +#### Weave Gitops Enterprise + +```yaml +extraEnvVars: + - name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL + value: "Login with ACME" +``` \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/configuration/recommended-rbac-configuration.mdx b/website/versioned_docs/version-0.24.0/configuration/recommended-rbac-configuration.mdx new file mode 100644 index 0000000000..03721cfdf3 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/configuration/recommended-rbac-configuration.mdx @@ -0,0 +1,178 @@ +--- +title: Recommended RBAC Configuration +--- + +This page summarises the contents of the [securing access to the dashboard](securing-access-to-the-dashboard.mdx), +[service account permissions](service-account-permissions.mdx) and [user permissions](user-permissions.mdx). They should be +read in addition to this page in order to understand the suggestions made here and +their ramifications. + +This page is purposefully vague as the intention is to give a broad idea of how +such a system could be implemented, not the specifics as that will be dependent +on your specific circumstances and goals. + +## Summary + +The general recommendation is to use OIDC and a small number of groups that +Weave GitOps can impersonate. + +OIDC is the recommended method for managing authentication as it decouples the +need to manage user lists from the application, allowing it to be managed via +a central system designed for that purpose (i.e. the OIDC provider). OIDC also +enables the creation of groups (either via your provider's own systems or by +using a connector like [Dex](../guides/setting-up-dex.md)). + +Configuring Weave GitOps to impersonate kubernetes groups rather than +users has the following benefits: +* A user's permissions for impersonation by Weave GitOps can be separate from + any other permissions that they may or may not have within the cluster. +* Users do not have to be individually managed within the cluster and can have + their permissions managed together. + +## Example set up + +Assume that your company has the following people in OIDC +* Aisha, a cluster admin, who should have full admin access to Weave GitOps +* Brian, lead of team-A, who should have admin permissions to their team's + namespace in Weave GitOps and readonly otherwise +* June and Jo, developers in team-A who should have read-only access to Weave GitOps + +You could then create 3 groups: + +* `wego-admin` + - Bound to the `ClusterRole`, created by Helm, `wego-admin-cluster-role` + - Aisha is the only member +* `wego-team-a-admin` + - Bound to a `Role`, using the same permissions as `wego-admin-role`, created + in Team's namespace + - Brian and Aisha are members +* `wego-readonly` + - Bound to a `ClusterRole` that matches `wego-admin-cluster-role` but with + no `patch` permissions. + - Aisha, Brian, June & Jo are all members + +The Weave GitOps service account can then be configured with: +```yaml +rbac: + impersonationResourceNames: ["wego-admin", "wego-team-a-admin", "wego-readonly"] + impersonationResources: ["groups"] +``` +so that only these three groups can be `impersonated` by the service account. + +:::caution Using OIDC for cluster and Weave GitOps Authentication +If the same OIDC provider is used to authenticate a user with the cluster +itself (e.g. for use with `kubectl`) and to Weave GitOps then, depending +on OIDC configuration, they may end up with the super-set of their permissions +from Weave GitOps and any other permissions granted to them. + +This can lead to un-intended consequences (e.g. viewing `secrets`). To avoid +this OIDC providers will often let you configure which groups are returned +to which clients: the Weave GitOps groups should not be returned to the +cluster client (and vice versa). +::: + +### Code + +The yaml to configure these permissions would look roughly like: + +
Expand to see example RBAC + +```yaml +# Admin cluster role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: wego-admin-cluster-role +rules: + - apiGroups: [""] + resources: ["secrets", "pods" ] + verbs: [ "get", "list" ] + - apiGroups: ["apps"] + resources: [ "deployments", "replicasets"] + verbs: [ "get", "list" ] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] +--- +# Read only cluster role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: wego-readonly-role +rules: + # All the 'patch' permissions have been removed + - apiGroups: [""] + resources: ["secrets", "pods" ] + verbs: [ "get", "list" ] + - apiGroups: ["apps"] + resources: [ "deployments", "replicasets"] + verbs: [ "get", "list" ] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list" ] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list" ] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list" ] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] +--- +# Bind the cluster admin role to the wego-admin group +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wego-cluster-admin +subjects: +- kind: Group + name: wego-admin # only Aisha is a member + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: wego-admin-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +# Bind the admin role in the team-a namespace for the wego-team-a-admin group +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: wego-team-a-admin-role + namespace: team-a +subjects: +- kind: Group + name: wego-team-a-admin # Aisha & Brian are members + apiGroup: rbac.authorization.k8s.io +roleRef: + # Use the cluster role to set rules, just bind them in the team-a namespace + kind: ClusterRole + name: wego-admin-role + apiGroup: rbac.authorization.k8s.io +--- +# Bind the readonly role to the readonly group +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wego-readonly-role +subjects: +- kind: Group + name: wego-readonly # Everyone is a member + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: wego-readonly-role + apiGroup: rbac.authorization.k8s.io +--- +``` + +
diff --git a/website/versioned_docs/version-0.24.0/configuration/securing-access-to-the-dashboard.mdx b/website/versioned_docs/version-0.24.0/configuration/securing-access-to-the-dashboard.mdx new file mode 100644 index 0000000000..b0f40ae381 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/configuration/securing-access-to-the-dashboard.mdx @@ -0,0 +1,15 @@ +--- +title: Securing access to the dashboard +--- + +## Dashboard Login + +There are 2 supported methods for logging in to the dashboard: +- [Login via an OIDC provider](../oidc-access) +- [Login via a cluster user account](../emergency-user) (not recommended) + +The recommended method is to integrate with an OIDC provider, +as this will let you control permissions for existing users and groups that have +already been configured to use OIDC. However, it is also possible to use the Emergency Cluster +User Account to login, if an OIDC provider is not available to use. +Both methods work with standard Kubernetes RBAC. diff --git a/website/versioned_docs/version-0.24.0/configuration/service-account-permissions.mdx b/website/versioned_docs/version-0.24.0/configuration/service-account-permissions.mdx new file mode 100644 index 0000000000..ef81eb1253 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/configuration/service-account-permissions.mdx @@ -0,0 +1,124 @@ +--- +title: Dashboard Runtime Permissions +--- + +# GitOps Dashboard Service Account Permissions + +:::danger Important +This doc covers the service account [permissions](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +for the **Weave Gitops application** itself (ie. the permissions the Dashboard needs to work). +For the service account for the **cluster user** role (ie. for the user accessing the +GitOps Dashboard), see the page [here](user-permissions.mdx). +::: + +The default permissions of the service account are defined in the [helm chart](https://github.com/weaveworks/weave-gitops/tree/main/charts/gitops-server/templates/role.yaml) which +will generate a cluster role with the following permissions: + +```yaml +rules: +# Used to query the cluster +- apiGroups: [""] + resources: ["users", "groups"] # set by rbac.impersonationResources + verbs: [ "impersonate" ] + # resourceNames: [] # set by rbac.impersonationResourceNames +# Used to get OIDC/static user credentials for login +- apiGroups: [""] + resources: [ "secrets" ] + verbs: [ "get", "list" ] + resourceNames: # set by rbac.viewSecretsResourceNames + - "cluster-user-auth" + - "oidc-auth" +# The service account needs to read namespaces to know where it can query +- apiGroups: [ "" ] + resources: [ "namespaces" ] + verbs: [ "get", "list" ] +``` + +These allow the pod to do three things: +* [Impersonate](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation) the user and operate in the cluster as them +* Read the available namespaces (this is required to understand the users' permissions) +* Read the `cluster-user-auth` and `oidc-auth` secrets, which are the default secrets + to store the emergency cluster user account and OIDC configuration (see + [securing access to the dashboard](securing-access-to-the-dashboard.mdx)) + +## The Helm values + +| Value | Description | Default | +|-----------------------------------|---------------------------------------------------------------------|--------------------------------------| +| `rbac.impersonationResources` | Which resource types the service account can impersonate | `["users", "groups"]` | +| `rbac.impersonationResourceNames` | Specific users, groups or services account that can be impersonated | `[]` | +| `rbac.viewSecretsResourceNames` | Specific secrets that can be read | `["cluster-user-auth", "oidc-auth"]` | + + +## Impersonation + +The primary way Weave GitOps queries the Kube API is via `impersonation`, the +application (not the cluster) authenticates the user (either via the [emergency +cluster user](../emergency-user) credentials or OIDC) then makes calls to the Kube API on the user's +behalf. This is equivalent to making a kubectl call like: + +```bash +$ kubectl get deployments --as aisha@example.com +``` + +Assuming the user `aisha@example.com` has been granted permissions to get +deployments within the cluster then this will return them. The same occurs +within the application. This makes the proper configuration of the application's +permissions very important as, without proper restrictions it can impersonate +very powerful `users` or `groups`. For example, the `system:masters` is group +is generally bound to the `cluster-admin` role which can do anything. + +For more details of the permissions needed by the user or group see the +[user permissions](user-permissions.mdx) guide. + +### Configuring impersonation + +It is highly recommended that you limit which users and groups that the +application can impersonate by setting `rbac.impersonationResourceNames` in +the Helm chart's `values`. e.g.: + +```yaml +rbac: + impersonationResources: ["groups"] + impersonationResourceNames: + - admin + - dev-team + - qa-team +``` +In this example the application can only impersonate the groups admin, dev-team +and qa-team (this also, implicitly disables the [emergency cluster user](../emergency-user)). + +Unfortunately not all OIDC providers support groups so you may need to +manually enumerate users, for example: +```yaml +rbac: + impersonationResources: ["users"] + impersonationResourceNames: + - aisha@example.com + - bill@example.com + - wego-admin # enable the emergency cluster user +``` + +A better, albeit more involved, solution is to set up an OIDC connector like +[Dex](../guides/setting-up-dex.md) and use that to manage groups for you. + +## Get namespaces + +The application itself uses get namespace permissions to pre-cache the list of +available namespaces. As the user accesses resources their permissions within +various namespaces is also cached to speed up future operations. + +## Reading the `cluster-user-auth` and `oidc-auth secrets` + +The `cluster-user-auth` and `oidc-auth` secrets provide information for authenticating +to the application. The former holds the username and bcrypt-hashed password +for the [emergency user](../emergency-user) and the latter holds OIDC configuration. + +The application needs to be able to access these secrets in order to +authenticate users. + +### Configuring secrets + +The `rbac.viewSecretsResourceNames` value allows the operator to change which secrets the +application can read. This is mostly so that, if the emergency user is not +configured, that secret can be removed, or if the secret _is_ in use but renamed. diff --git a/website/versioned_docs/version-0.24.0/configuration/tls.md b/website/versioned_docs/version-0.24.0/configuration/tls.md new file mode 100644 index 0000000000..b9932d6ff6 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/configuration/tls.md @@ -0,0 +1,49 @@ +--- +title: TLS and Certificates +--- + +## TLS configuration + +By default the dashboard will listen on 0.0.0.0:9001 with TLS disabled and +without exposing any external connection. + +Exposing services without TLS if not recommended. Without a certificate, a user +can't be sure they are using the right service, and the traffic will be easily +monitored, or even tampered with. All communication between the user and an endpoint +with TLS will be encrypted. + +To expose an external connection, you must first configure TLS. TLS termination +can be provided via an ingress controller or directly by the dashboard. In +either case, the Helm Release must be updated. To have the dashboard itself +handle TLS, you must create a `tls` secret containing the cert and key: + +```cli +kubectl create secret tls my-tls-secret \ + --cert=path/to/cert/file \ + --key=path/to/key/file +``` + +and reference it from the helm release: + +```yaml + values: + serverTLS: + enabled: true + secretName: "my-tls-secret" +``` + +If you prefer to delegate TLS handling to the ingress controller instead, your +helm release should look like: + +```yaml + values: + ingress: + enabled: true + ... other parameters specific to the ingress type ... +``` + +## cert-manager + +Install [cert-manager](../guides/cert-manager.md) and request a `Certificate` in +the `flux-system` namespace. Provide the name of secret associated with the +certificate to the weave-gitops-enterprise HelmRelease as described above. diff --git a/website/versioned_docs/version-0.24.0/configuration/user-permissions.mdx b/website/versioned_docs/version-0.24.0/configuration/user-permissions.mdx new file mode 100644 index 0000000000..2ec436f633 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/configuration/user-permissions.mdx @@ -0,0 +1,244 @@ +--- +title: User Permissions +--- +import TierLabel from "../_components/TierLabel"; + +This is an explanation of the [kubernetes permissions](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +needed by users/groups of the Weave GitOps application. As covered in +[service account permissions](service-account-permissions.mdx) +the primary way that the application interacts with the Kube API is via [impersonation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation). +This means that the permissions granted to the users and groups that Weave GitOps +can impersonate determine the scope of actions that it can take within your cluster. + +At a minimum, a User should be bound to Role in the `flux-system` namespace (where +flux stores its resources by default) with the following permissions: + +```yaml +rules: + # Flux Resources + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: [ "notification.toolkit.fluxcd.io" ] + resources: [ "providers", "alerts" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["infra.contrib.fluxcd.io"] + resources: ["terraforms"] + verbs: [ "get", "list", "watch", "patch" ] + + # Read access for all other Kubernetes objects + - apiGroups: ["*"] + resources: ["*"] + verbs: [ "get", "list", "watch" ] +``` + +For a wider scope the User can be bound to a ClusterRole with the same set. + +### Flux Resources + +The resources that Flux works with directly, including the one from TF-controller. + +| Api Group | Resources | Permissions | +| ------------------------------ | ----------------------------------------------------------------------- | ---------------- | +| kustomize.toolkit.fluxcd.io | kustomizations | get, list, patch | +| helm.toolkit.fluxcd.io | helmreleases | get, list, patch | +| source.toolkit.fluxcd.io | buckets, helmcharts, gitrepositories, helmrepositories, ocirepositories | get, list, patch | +| notification.toolkit.fluxcd.io | providers, alerts | get, list | +| infra.contrib.fluxcd.io | terraforms | get, list, patch | + +In order for Weave GitOps to be able to accurately display the state of Flux it +needs to be able to query the [CRDs](https://fluxcd.io/docs/components/) that Flux uses. This is done using the +`get` and `list` permissions + +The `patch` permissions are used for 2 features: to suspend and resume +reconciliation of a resource by modifying the 'spec' of a resource, +and to force reconciliation of a resource by modifying the annotations +of the resource. These features work the same way `flux suspend`, +`flux resume` and `flux reconcile` does on the CLI. + +### Resources managed via Flux + +| Api Group | Resources | Permissions | +|---------------------------|--------------------------------------------------------------------------------|------------------| +| "" | configmaps, secrets, pods, services, persistentvolumes, persistentvolumeclaims | get, list, watch | +| apps | deployments, replicasets, statefulsets | get, list, watch | +| batch | jobs, cronjobs | get, list, watch | +| autoscaling | horizontalpodautoscalers | get, list, watch | +| rbac.authorization.k8s.io | roles, clusterroles, rolebindings, clusterrolebindings | get, list, watch | +| networking.k8s.io | ingresses | get, list, watch | + +Weave GitOps reads basic resources so that it can monitor the effect that Flux has +on what's running. + +Reading `secrets` enables Weave GitOps to monitor the state of Helm releases +as that's where it stores the [state by default](https://helm.sh/docs/faq/changes_since_helm2/#secrets-as-the-default-storage-driver). +For clarity this these are the Helm release objects _not_ the Flux HelmRelease +resource (which are dealt with by the earlier section). + +### Feedback from Flux + +The primary method by which Flux communicates the status of itself is by events, +these will show when reconciliations start and stop, whether they're successful +and information as to why they're not. + +## Weave GitOps Enterprise + +Weave GitOps Enterprise extends Weave GitOps OSS by adding more roles. These roles may need to be extended further in order to support certain use cases. Some of the most common use cases are described below. + +### Progressive delivery with Flagger + +Weave GitOps Enterprise integrates with Flagger in order to provide a view on progressive delivery deployments. This includes the ability to view all the resources that Flagger manages during its operation. The default ClusterRole `gitops-canaries-reader` includes the minimum permissions necessary for a user to be able to view canary object details, metric template object details and canary related events. + +When Flagger is configured to integrate with a service mesh such as Linkerd or Istio for the rollout, then this ClusterRole needs to be extended so that it can read the additional service mesh resources being generated by Flagger. Note that currently, in order to display service mesh or ingress related resources, we require `spec.provider` to be set in each canary resource. + +The following table provides a list of all the custom resources that Flagger generates grouped by provider: + +| Provider | API Group | Resource | +| --- | --- | --- | +| AppMesh | appmesh.k8s.aws | virtualnode | +| | appmesh.k8s.aws | virtualrouter | +| | appmesh.k8s.aws | virtualservice | +| Linkerd | split.smi-spec.io | trafficsplit | +| Istio | networking.istio.io | destinationrule | +| | networking.istio.io | virtualservice | +| Contour | projectcontour.io | httpproxy | +| Gloo | gateway.solo.io | routetable | +| | gloo.solo.io | upstream | +| Nginx | networking.k8s.io | ingress | +| Skipper | networking.k8s.io | ingress | +| Traefik | traefik.containo.us | traefikservice | +| Open Service Mesh | split.smi-spec.io | trafficsplit | +| Kuma | kuma.io | trafficroute | +| GatewayAPI | gateway.networking.k8s.io | httproute | + +For example, the following manifest shows how `gitops-canaries-reader` has been extended to allow the user for viewing TrafficSplit resources when Linkerd is used: + +
Expand to see example canary reader RBAC + +```yaml title="gitops-canaries-reader.yaml" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: gitops-canaries-reader +rules: +- apiGroups: + - flagger.app + resources: + - canaries + - metrictemplates + verbs: + - get + - list +- apiGroups: + - "" + resources: + - events + verbs: + - get + - watch + - list +# Additional permissions for Linkerd resources are added below +- apiGroups: + - split.smi-spec.io + resources: + - trafficsplits + verbs: + - get + - list +``` + +
+ +#### Setting up remote cluster permissions + +In order to view canaries in a remote cluster from the management cluster, you need to consider the following: +- The service account used to access the remote cluster needs to be able to list namespaces and custom resource definitions in the given cluster. It additionally needs to be able to impersonate users and groups. +- The user or group that logs in to the management cluster, needs appropriate permissions to certain resources of the remote cluster. + +For example, applying the following manifest on remote clusters, ensures that the `wego-admin` user will be able to view canary information from within the Weave GitOps Enterprise UI on the management cluster: + +
Expand to see example of remote cluster canary reader + +```yaml title="remote-cluster-service-user-rbac.yaml" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: user-groups-impersonator +rules: + - apiGroups: [""] + resources: ["users", "groups"] + verbs: ["impersonate"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: impersonate-user-groups +subjects: + - kind: ServiceAccount + name: remote-cluster-01 # Service account created in remote cluster + namespace: default +roleRef: + kind: ClusterRole + name: user-groups-impersonator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: canary-reader +rules: + - apiGroups: [""] + resources: [ "events", "services" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "apps" ] + resources: [ "*" ] + verbs: [ "get", "list" ] + - apiGroups: [ "autoscaling" ] + resources: [ "*" ] + verbs: [ "get", "list" ] + - apiGroups: [ "flagger.app" ] + resources: [ "canaries", "metrictemplates"] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "helm.toolkit.fluxcd.io" ] + resources: [ "helmreleases" ] + verbs: [ "get", "list" ] + - apiGroups: [ "kustomize.toolkit.fluxcd.io" ] + resources: [ "kustomizations" ] + verbs: [ "get", "list" ] + - apiGroups: [ "source.toolkit.fluxcd.io" ] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-canaries +subjects: +- kind: User + name: wego-admin # User logged in management cluster, impersonated via service account + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: canary-reader + apiGroup: rbac.authorization.k8s.io +``` + +
+ +You may need to add more users/groups to the `read-canaries` ClusterRoleBinding to ensure additional users can view canary information from within the Weave GitOps Enterprise UI. diff --git a/website/versioned_docs/version-0.24.0/explorer/configuration.mdx b/website/versioned_docs/version-0.24.0/explorer/configuration.mdx new file mode 100644 index 0000000000..468d9fd9a1 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/explorer/configuration.mdx @@ -0,0 +1,199 @@ +--- +title: Configuration +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Configuration + + + +This page helps you to understand the options available to configure Explorer + +## Prerequisites +Before using Explorer, please ensure that: +- You have Weave Gitops Enterprise [v0.23.0](../../releases) + +## Setup + +The following configuration options are available for you to setup Explorer. + +- `.spec.values.enableExplorer`: feature flag to control whether Explorer is enabled. +- `.spec.values.useQueryServiceBackend`: feature flag to control whether you want to leverage Explorer backend capabilities for +other UI experiences like [Applications](../../getting-started/ui#applications-view) or [Sources](../../getting-started/ui#the-sources-view) +- `.spec.values.explorer.collector.serviceAccount`: ServiceAccount `name` and `namespace` that explorer collector will use to impersonate +in leaf clusters. Make sure you read [authz for collector](#Authentication_and_Authorization_for_collecting) before setting it. Default +values are `name: collector`, `namespace: flux-system`. + +You should specify them in your HelmRelease values: + +```yaml +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops-enterprise + namespace: flux-system +spec: + # ... other spec components + values: + enableExplorer: true # feature flag to enable explorer + useQueryServiceBackend: true # uses explorer query backend in collection UIs + explorer: + collector: + serviceAccount: # service account that collector will impersonate in leaf clusters + name: collector + namespace: flux-system +``` + +## Configuration + +### Clusters + +Explorer watches the [GitopsClusters](https://docs.gitops.weave.works/docs/next/cluster-management/managing-existing-clusters/#connect-a-cluster) +that you have connected to Weave Gitops Enterprise, as well as your Management cluster. + +### Kinds + +Explorer watches for the following kind resources out of the box: + +[Flux GitOps Toolkit](https://fluxcd.io/flux/components/) + +- [HelmRelease](https://fluxcd.io/flux/components/helm/helmreleases/) +- [Kustomizations](https://fluxcd.io/flux/components/kustomize/kustomization/) +- [Sources](https://fluxcd.io/flux/components/source/) + - [GitRepostiories](https://fluxcd.io/flux/components/source/gitrepositories/) + - [OciRepositories](https://fluxcd.io/flux/components/source/ocirepositories/) + - [HelmRepositories](https://fluxcd.io/flux/components/source/helmrepositories/) + - [HelmCharts](https://fluxcd.io/flux/components/source/helmcharts/) + - [Buckets](https://fluxcd.io/flux/components/source/buckets/) + +## Data Layer + +Explorer take a simple approach to manage resource views. It leverages a Data Store for caching the views and query them. +The storage lifecycle is bounded to Weave Gitops Enterprise app and does not provide persistence guarantees. +Instead, it requests data as required to the leaf clusters. In its simplest form, the data store used is [SQLite](https://sqlite.org/index.html). + +## Authentication and Authorization + +There are two main paths to consider within Explorer in the context of authentication and authorization (authN/authZ): + +1. The read or querying path is exercised when a weave gitops user queries the data. +2. The write or collecting path is exercised to gather the resources from the leaf clusters. + +We look into them separately. + +## Authentication and Authorization for querying + +Explorer leverages existing authentication and authorization built-in the [application](https://docs.gitops.weave.works/docs/configuration/securing-access-to-the-dashboard/). +It identifies for a user logged in the application: its identity and the access permissions via Kuberentes RBAC. +Query results are filtered honouring the access determined via RBAC. + +## Authentication and Authorization for collecting + +[GitopsClusters](../cluster-management/managing-existing-clusters.mdx#connect-a-cluster) +define the connection and security context that Explorer leverages to collect data from leaf clusters. Given that you have followed the indications +in [setup RBAC](../configuration/service-account-permissions.mdx), the GitopsCluster service account is able to impersonate any user or group. + +:::tip + +Collector RBAC resources are part of your leaf clusters common RBAC configuration. It is commonly +located in your `clusters/bases` folder, as described in [Getting started](./getting-started.mdx). + +::: + + +To configure collection, you would need to extend this configuration with the following: + +1. Create a ServiceAccount for the one that you specified in your [setup](#setup) `.spec.values.explorer.collector.serviceAccount`. + +
Expand to see an example + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: collector # should match .spec.values.explorer.collector.serviceAccount.name + namespace: flux-system # should match .spec.values.explorer.collector.serviceAccount.namespace +``` + +
+ + +2. Create a ClusterRole with the permissions to watch the supported resources. + +
Expand to see an example + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: collector # could be .spec.values.explorer.collector.serviceAccount.name +rules: + - apiGroups: [ "rbac.authorization.k8s.io" ] + resources: [ "roles", "clusterroles", "rolebindings", "clusterrolebindings" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "kustomize.toolkit.fluxcd.io" ] + resources: [ "kustomizations" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "helm.toolkit.fluxcd.io" ] + resources: [ "helmreleases" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "source.toolkit.fluxcd.io" ] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "list", "watch" ] +``` + +
+ +3. Create a ClusterRolebinding to assign previous ClusterRole to the created collector `ServiceAccount`. + +
Expand to see an example + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: collector # could be .spec.values.explorer.collector.serviceAccount.name +subjects: + - kind: ServiceAccount + name: collector # should match .spec.values.explorer.collector.serviceAccount.name + namespace: flux-system # should match .spec.values.explorer.collector.serviceAccount.namespace +roleRef: + kind: ClusterRole + name: collector # name of the cluster role created earlier + apiGroup: rbac.authorization.k8s.io +``` + +
+ +If you want the collector to watch a particular namespace use a RoleBinding instead. + +4. Extend impersonation rules to allow service account impersonation for ServiceAccount `collector` + +
Expand to see an example + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: clusters-service-impersonator-role +rules: + - apiGroups: [""] + resources: ["users", "groups"] + verbs: ["impersonate"] + - apiGroups: [ "" ] + resources: [ "serviceaccounts" ] + verbs: [ "impersonate" ] + resourceNames: + - "collector" # should match .spec.values.explorer.collector.serviceAccount.name +``` +
+ +## Next Steps +- See [querying](./querying.mdx) to deep dive in how to query. +- See [operations](./operations.mdx) for day troubleshooting and operations. + + diff --git a/website/versioned_docs/version-0.24.0/explorer/getting-started.mdx b/website/versioned_docs/version-0.24.0/explorer/getting-started.mdx new file mode 100644 index 0000000000..a9ade1d8db --- /dev/null +++ b/website/versioned_docs/version-0.24.0/explorer/getting-started.mdx @@ -0,0 +1,86 @@ +--- +title: Getting started +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Getting started + + + +This guide shows you the basics steps to start using Explorer. + +## Pre-requisites + +Before using Explorer, please ensure that: + +- You have Weave Gitops Enterprise [v0.21.2](../../releases) +- You have deployed an application. + +## Setup + +Explorer is enabled via configuration through the feature flag `enableExplorer` that you could +configure in your Weave Gitops Enterprise HelmRelease values: + + +```yaml +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops-enterprise + namespace: flux-system +spec: + # ... other spec components + values: + enableExplorer: true +``` + +For a complete overview on the configuration you could see [configuration](./configuration.mdx). + +## Explorer UI + +Login to Weave Gitops and Explorer will be shown in the navigation menu `Explorer`. + +Explorer UI looks as follows: + +![explorer](imgs/explorer-ui.png) + +It has two main components: + +- A search dialog with filter to querying the platform resources +- A table with the filtered resources. + +For a more detailed view on the UI you could see [querying](./querying.mdx). + +## Discovering Applications + +For this example we are using the application `podinfo` but you could use any other app that suits your scenario. + +There are two main ways to discover data: + +- Querying via the search dialog. +- Filtering via the pre-built filter list. + +### Querying + +Just typing `podinfo` in the search dialog returns the applications that matches the term + +![podinfo app found](imgs/getting-started-querying-app.png) + +For more info on how querying works to query and advanced queries see [querying](./querying.mdx). + +### Filtering + +A set of queries are set as filters. For example, just select the filter `Failed` to get all the apps that are failing in +a moment in time. + +![failed apps](imgs/getting-started-failed.png) + +## Next Steps +Once you have an initial experience using explorer, you could + +- Deep dive in [querying](./querying.mdx) to see how it works and advanced queries. +- See [configuration](./configuration.mdx) for the options that you could configure in explorer. \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/debug-access-rules.png b/website/versioned_docs/version-0.24.0/explorer/imgs/debug-access-rules.png new file mode 100644 index 0000000000..729465398a Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/debug-access-rules.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-and.png b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-and.png new file mode 100644 index 0000000000..484f85eae6 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-and.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-filter-cluster.png b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-filter-cluster.png new file mode 100644 index 0000000000..0b67b8f36b Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-filter-cluster.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-filter-kind.png b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-filter-kind.png new file mode 100644 index 0000000000..2e70a83d66 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-filter-kind.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-overview.png b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-overview.png new file mode 100644 index 0000000000..ed99a33135 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-query-overview.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-ui.png b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-ui.png new file mode 100644 index 0000000000..d2f1b32d0b Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/explorer-ui.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/getting-started-failed.png b/website/versioned_docs/version-0.24.0/explorer/imgs/getting-started-failed.png new file mode 100644 index 0000000000..e106373bdc Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/getting-started-failed.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/imgs/getting-started-querying-app.png b/website/versioned_docs/version-0.24.0/explorer/imgs/getting-started-querying-app.png new file mode 100644 index 0000000000..4e6e41efbf Binary files /dev/null and b/website/versioned_docs/version-0.24.0/explorer/imgs/getting-started-querying-app.png differ diff --git a/website/versioned_docs/version-0.24.0/explorer/intro.mdx b/website/versioned_docs/version-0.24.0/explorer/intro.mdx new file mode 100644 index 0000000000..7b694d68cf --- /dev/null +++ b/website/versioned_docs/version-0.24.0/explorer/intro.mdx @@ -0,0 +1,42 @@ +--- +title: Introduction +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Explorer + + + +As platform engineer or as developer, your applications and platform services will likely span multiple kubernetes clusters +or infrastructure components. In order to manage and operate them you require a platform capability that +allows you to discover the resources from a single place. + +Explorer is that capability that allows any platform user to discover platform resources from a single place +across all your kubernetes clusters. + +![explorer](imgs/explorer-ui.png) + +## FAQ + +### Which journeys would be able to use explorer for? + +Explorer is better suited for journeys matching the discovery of resources across the platform resources inventory. + +### Which journeys would be better using other weave gitops capabilities for? + +If you have a particular resources you want to manage, weave gitops offers single resource experience +for almost every resource. + +### Which Kinds does explorer support? + +Explorer support all Flux Applications and Sources CRDs + +See [Supported Kinds](../configuration#kinds) for more details. + +## Next Steps + +Now that you know what Explorer is, follow [getting started](../getting-started) to quickly have a feeling +of what Explorer can do for you. \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/explorer/operations.mdx b/website/versioned_docs/version-0.24.0/explorer/operations.mdx new file mode 100644 index 0000000000..3a3b85f6a9 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/explorer/operations.mdx @@ -0,0 +1,25 @@ +--- +title: Operations +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Operations + + + +As platform engineer you could need to have a finer understanding on the underlying logic for Explorer. The following +options are available to you to operate and troubleshoot it. + +## Debug Access Rules + +It is a debugging tool to make visible explorer authorization logic. You could find it as tab `Access Rules` alongside +the `Query` tab. + +![access rules](imgs/debug-access-rules.png) + +You could discover by `Cluster` and `Subject` the `Kinds` it is allowed to read. These are the rules that +will be the source of truth doing authorization when a user does a query. + diff --git a/website/versioned_docs/version-0.24.0/explorer/querying.mdx b/website/versioned_docs/version-0.24.0/explorer/querying.mdx new file mode 100644 index 0000000000..7d141224ed --- /dev/null +++ b/website/versioned_docs/version-0.24.0/explorer/querying.mdx @@ -0,0 +1,142 @@ +--- +title: Querying +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Querying + + + +Explorer recommended way to discover resources is via its search dialog. This guide provides the background to understand +it and set how to use it. + +## Schema + +Every resource is normalised to the following common schema: + +| __Key__ | __Description__ | +| ----------------- | -------------- | +| Cluster | Name of cluster where the resource exists. As gitops cluster ``| +| Namespace | Namespace name where the resource exists.| +| Kind | Resource kubernetes type or [kind](https://kubernetes.io/docs/reference/using-api/api-concepts/#standard-api-terminology)| +| Name | Resource name as specified in its manifest.| +| Status | Resource health status. Indicates the status of its reconciliation.| +| Message | Resource health status message. It extends status field with information about the status.| + +For a `podinfo` helm release from a cluster `default/progress-delivery-demo2-32` like this: + +```yaml +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: podinfo + namespace: flux-system +spec: + chart: + spec: + chart: podinfo + interval: 1m + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: 6.0.0 + interval: 1m +status: + conditions: + - message: Release reconciliation succeeded + reason: ReconciliationSucceeded + status: "True" + type: Ready +``` + +The schema looks like + +| Cluster | Namespace | Kind | Name | Status | Message | +|------------| ---------| ----------------|---------|----------|------------------------| +|`default/progress-delivery-demo2-32` | `flux-system` | `HelmRelease` | `podinfo` | `Success` | `Release reconciliation succeeded` | + +And represented in the UI like + +![explorer](imgs/explorer-query-overview.png) + +## Querying + +Based on the previous schema you have two main modes to discover or filter the data + +1. Filter by key and value: when you know the pair that you want to filter from. +2. Filter by value: when you want to filter across keys or does not know the key + +:::info Matching + +For `key:value` filter the results would be those results where `key=value` exactly. +::: + + +### Filter by key and value + +You search with the format `key:value` indicating that you want to filter the resources matching the key and value. +A couple of examples could be: + +- `kind:HelmRelease` to filter all helm releases. +- `status:Failed` to find all failing resources. + +### Filter by value +You search with a single term `value` indicating that you want to filter across the supported keys by values. +The value would be search cross keys: `name`, `namespace` and `cluster` as synthatic sugar for `name:value` OR `namespace:value` OR `cluster:value`. + +An example cold be using `podinfo` that would translate into `name:podinfo` OR `namespace:podinfo` OR `cluster:podinfo`. + +### Operators + +#### AND + +You could refine filtered results by using AND semantics adding different filters. + +For example if you want to have all the resources within `flux-system` namespace from `management` cluster you could +create a query like the following: + +![and query](imgs/explorer-query-and.png) + +with two sequential filters `namespace:flux-system` and `cluster:management` to achieve it. + +## FAQ + +Here a set of questions around querying to allow you get started by practice. + +### How can I discover resources from a cluster? + +You could use the single term with `clusterName` or `cluster:clusterName` + +For example for management cluster could be `management` or `cluster:management` + +![filter by cluster](imgs/explorer-query-filter-cluster.png) + +### How can I discover resources from a namespace? + +You could use the single term with `namespaceName` or `namespace:namespaceName`. + +### How can I discover applications? + +You should filter by kind where kind could be either HelmRelease or Kustomization like `kind:Kustomization` or `kind:HelmRelease`. +You could also use the pre-built filters. + +![filter by kind](imgs/explorer-query-filter-kind.png) + +:::warning Exact Matching + +Remember that `key:value` is `key=value` +- `kind:HelmRelease` returns all Helm Releases apps +- `kind:helmrelease` wont return anything +::: + +### How can I discover failed applications? + +You should filter by `status:Failed` to find the failing apps or use the pre-built filters. + +### How can I discover resources by name? + +You could use the single term with `resourceName` or `name:resourceName`. diff --git a/website/versioned_docs/version-0.24.0/getting-started/deploy.mdx b/website/versioned_docs/version-0.24.0/getting-started/deploy.mdx new file mode 100644 index 0000000000..d4d4e6ac6f --- /dev/null +++ b/website/versioned_docs/version-0.24.0/getting-started/deploy.mdx @@ -0,0 +1,146 @@ +--- +title: 2. Deploy an Application +hide_title: true +--- + +# Part 2: Deploy an Application + +Now that you have a feel for how to navigate the dashboard, let's deploy a new +application to explore further. In this section we will use the [podinfo](https://github.com/stefanprodan/podinfo) sample web application. + +## Deploying podinfo + +1. Clone or navigate back to your git repository where you have bootstrapped Flux, for example: + + ``` + git clone https://github.com/$GITHUB_USER/fleet-infra + cd fleet-infra + ``` + +1. Create a `GitRepository` Source for podinfo + + ``` + flux create source git podinfo \ + --url=https://github.com/stefanprodan/podinfo \ + --branch=master \ + --interval=30s \ + --export > ./clusters/my-cluster/podinfo-source.yaml + ``` + +1. Commit and push the `podinfo-source` to the `fleet-infra` repository + + ``` + git add -A && git commit -m "Add podinfo source" + git push + ``` + +1. Create a `kustomization` to build and apply the podinfo manifest + + ``` + flux create kustomization podinfo \ + --target-namespace=flux-system \ + --source=podinfo \ + --path="./kustomize" \ + --prune=true \ + --interval=5m \ + --export > ./clusters/my-cluster/podinfo-kustomization.yaml + ``` + +1. Commit and push the `podinfo-kustomization` to the `fleet-infra` repository + + ``` + git add -A && git commit -m "Add podinfo kustomization" + git push + ``` + +## View the application in Weave GitOps + +Flux will detect the updated `fleet-infra` and add podinfo. If we navigate back to the [dashboard](http://localhost:9001/) you should see the podinfo application appear. + +![Applications summary view showing Flux System, Weave GitOps and Podinfo](/img/dashboard-applications-with-podinfo.png) + +Click on podinfo and you will see details about the deployment, including that there are 2 pods available. + +![Applications details view for podinfo showing 2 pods](/img/dashboard-podinfo-details.png) + +:::info +Podinfo will come with the HorizontalPodAutoscaler, which uses the `metrics-server`. +For the purposes of this tutorial, we don't need the `metrics-server`, but it will mean +that the HorizontalPodAutoscaler will report as `Not ready` in your Dashboard. + +To remove this warning, install the [`metrics-server`](https://github.com/kubernetes-sigs/metrics-server) +(if you are using a `kind` cluster you may need to do more to get this working), +but you can ingore the warning if you prefer. +::: + +## Customize podinfo + +To customize a deployment from a repository you don’t control, you can use Flux in-line patches. The following example shows how to use in-line patches to change the podinfo deployment. + +1. Add the `patches` section as shown below to the field spec of your `podinfo-kustomization.yaml` file so it looks like this: + +
Expand to see Kustomization patches + + ```yaml title="./clusters/my-cluster/podinfo-kustomization.yaml" + --- + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: podinfo + namespace: flux-system + spec: + interval: 60m0s + path: ./kustomize + prune: true + sourceRef: + kind: GitRepository + name: podinfo + targetNamespace: flux-system + // highlight-start + patches: + - patch: |- + apiVersion: autoscaling/v2beta2 + kind: HorizontalPodAutoscaler + metadata: + name: podinfo + spec: + minReplicas: 3 + target: + name: podinfo + kind: HorizontalPodAutoscaler + // highlight-end + ``` + +
+ +1. Commit and push the `podinfo-kustomization.yaml` changes: + + ``` + git add -A && git commit -m "Increase podinfo minimum replicas" + git push + ``` + +3. Navigate back to the dashboard and you will now see a newly created pod + + ![Applications details view for podinfo showing 3 pods](/img/dashboard-podinfo-updated.png) + + +## Suspend updates + +Suspending updates to a kustomization allows you to directly edit objects applied from a kustomization, without your changes being reverted by the state in Git. + +To suspend updates for a kustomization, from the details page, click on the suspend button at the top, and you should see it be suspended: + +![Podinfo details showing Podinfo suspended](/img/dashboard-podinfo-details-suspended.png) + +This shows in the applications view with a yellow warning status indicating it is now suspended + +![Applications summary view showing Podinfo suspended](/img/dashboard-podinfo-suspended.png) + +To resume updates, go back to the details page, click the resume button, and after a few seconds reconsolidation will continue. + +## Complete! + +Congratulations 🎉🎉🎉 + +You've now completed the getting started guide. We would welcome any and all [feedback](/feedback-and-telemetry) on your experience. diff --git a/website/versioned_docs/version-0.24.0/getting-started/intro.mdx b/website/versioned_docs/version-0.24.0/getting-started/intro.mdx new file mode 100644 index 0000000000..4dc27c8d3a --- /dev/null +++ b/website/versioned_docs/version-0.24.0/getting-started/intro.mdx @@ -0,0 +1,16 @@ +--- +title: Getting Started +hide_title: true +--- + +# Getting Started with Weave GitOps + +This hands-on guide will introduce you to the basics of the GitOps Dashboard web UI, to help you understand the state of your system, before deploying a new application to your cluster. It is adapted from this guide - [Flux - Getting Started](https://fluxcd.io/docs/get-started/). + +If you haven't already, be sure to check out our [introduction](../intro.mdx) to Weave GitOps and our [installation docs](../installation/index.mdx). + +## TL;DR: Highlights + +- **Applications view** - allows you to quickly understand the state of your deployments across a cluster at a glance. It shows summary information from `kustomization` and `helmrelease` objects. +- **Sources view** - shows the status of resources which are synchronizing content from where you have declared the desired state of your system, for example Git repositories. This shows summary information from `gitrepository`, `helmrepository` and `bucket` objects. +- **Flux Runtime view** - provides status on the GitOps engine continuously reconciling your desired and live state. It shows your installed GitOps Toolkit Controllers and their version. diff --git a/website/versioned_docs/version-0.24.0/getting-started/ui.mdx b/website/versioned_docs/version-0.24.0/getting-started/ui.mdx new file mode 100644 index 0000000000..25bcb60815 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/getting-started/ui.mdx @@ -0,0 +1,112 @@ +--- +title: 1. Explore the GitOps UI +hide_title: true +--- + +# Part 1: Explore the GitOps UI + +Weave GitOps provides insights into your application deployments, and makes continuous delivery with GitOps easier to adopt and scale across your teams. We will now login to the dashboard web UI and start to explore the state of our GitOps deployments. + +## Login to the GitOps Dashboard + +1. Expose the service running on the cluster + + ``` + kubectl port-forward svc/ww-gitops-weave-gitops -n flux-system 9001:9001 + ``` + +1. [Open the dashboard](http://localhost:9001/) and login using either the [emergency cluster user](../configuration/emergency-user.mdx) + or OIDC based on your [configuration](../configuration/securing-access-to-the-dashboard.mdx). + If you followed the example above, the emergency user will be configured with the username set to `admin`. + The password to the non-encrypted value you provided as `$PASSWORD`. + + ![Weave GitOps login screen](/img/dashboard-login.png) + +## Applications view + +When you login to the dashboard you are brought to the Applications view, which allows you to quickly understand the state of your deployments across a cluster at a glance. It shows summary information from `kustomization` and `helmrelease` objects. + +![Applications summary view showing Flux System and Weave GitOps deployments](/img/dashboard-applications-overview.png) + +In the above screenshot you can see: +- a `Kustomization` called `flux-system`, which was created when Flux was bootstrapped onto the Cluster, and is deploying the GitOps Toolkit controllers. It is also deploying further Flux objects defined in the same repo, so that Flux will deploy additional workloads which includes our Helm Chart for Weave GitOps. +- a `HelmRelease` called `ww-gitops` which deploys the aforementioned Helm Chart. + +This table view shows the reported status so you can understand whether a reconciliation has been successful, and when they have last been updated. You can also see where the Flux objects are deployed and which `Source` object they are reconciling from; clicking the name of the Source it will take you to a detail view for the given source object. The view automatically updates every few seconds so you know the current state of your system. + +You can search for and filter objects by `Name` by clicking the magnifying glass, or filter by `Type` by clicking the strawberry icon to its right. + +Clicking the Name of an object will take you to a detailed view for the given Kustomization or HelmRelease. Which we will explore in a moment. + +## The Sources view + +Clicking on Sources in the left hand menu will bring you to the Sources view. This view shows you where flux pulls its application definitions from, for example Git repositories, and the current state of that synchronization. This shows summary information from `gitrepository`, `helmrepository`, `helmchart` and `bucket` objects. + +![Sources summary view showing Flux System and Weave GitOps sources](/img/dashboard-sources.png) + +In the above screenshot you can see: +- a `GitRepository` called `flux-system`, which was created when Flux was bootstrapped onto the Cluster, and contains the manifests for the GitOps Toolkit and Weave GitOps and various Flux objects. +- a `HelmChart` called `flux-system-ww-gitops`, which is automatically created by Flux when you define a `HelmRelease` to deploy a Helm Chart from a given source. +- a `HelmRepository` called `ww-gitops` which pulls from the Helm Repository where the Weave GitOps Helm Chart is published. + +The table view again shows summary status information so you can see whether Flux has been able to successfully pull from a given source and which specific commit was last detected. It shows key information like the `Interval`, namely how frequently Flux will check for updates in a given source location. You can apply filtering as per the Applications view, can click the `URL` to navigate to a given source i.e. a repository in GitHub, or the `Name` of a `Source` to view more details about it. + +## The Flux Runtime view + +Clicking on `Flux Runtime` provides information on the GitOps engine, which is continuously reconciling your desired and live state. It provides two different tabs: controllers and CRDs. + +### Controllers + +The controllers tab shows your installed GitOps Toolkit Controllers and their version. + +![Flux Runtime view showing the various GitOps Toolkit controllers](/img/dashboard-flux-runtime.png) + +By default `flux bootstrap` will install the following controllers: +- helm-controller +- kustomize-controller +- notification-controller +- source-controller + +For a full description of the controllers, see [GitOps ToolKit components](https://fluxcd.io/docs/components/) in the Flux documentation. + +From this view you can see whether the controllers are healthy and which version of a given component is currently deployed. + +### CRDs + +The CRD tab lists the custom resources that the GitOps Toolkit Controllers use. This helps you see what resources you will be able to create. + +![Flux Runtime view showing the various GitOps Toolkit controllers](/img/dashboard-flux-runtime-crd.png) + +### Exploring the flux-system deployment + +Let's explore the `flux-system` kustomization. Navigate back to the `Applications` view and click on the `flux-system` object. + +![Application detail view for the flux system kustomization](/img/dashboard-application-flux.png) + +After a few moments loading the data, you should see similar to the above screenshot. From here you can see key information about how this resource is defined: which `Source` it is reading from, the latest applied commit, the exact path with the Source repository that is being deployed, and the `Interval` in which Flux will look to reconcile any difference between the declared and live state - i.e. if a `kubectl` patch had been applied on the cluster, it would effectively be reverted. If a longer error message was being reported by this object, you would be able to see it in its entirety on this page. + +Underneath the summary information are four tabs: + +- Details (default) is a table view which shows all the Kubernetes objects (including flux objects, deployments, pods, services, etc) managed and deployed through this `kustomization`. +- Events (shown below) shows any related Kubernetes events to help you diagnose issues and understand health over time. +- Reconciliation Graph (shown below) provides a directional graph alternative to the Details view to help you understand how the various objects relate to each other. +- Dependencies provides a directional graph that helps you understand dependencies between objects, if there are any. This helps you make sure that your automations are set up in the correct order. +- Yaml (shown below) provides a raw dump on the current object as it currently exists inside your cluster. Note that this will be different from what's in your gitops repository, since this yaml view will contain the current status of the object. + +**Events tab** +![Application detail view showing events for an object](/img/dashboard-application-events.png) + +**Reconciliation Graph tab** +![Application detail view showing reconciliation graph - a directional graph showing object relationships](/img/dashboard-application-reconciliation.png) + +**Yaml tab** +![Application detail view showing the yaml display](/img/dashboard-application-yaml.png) + +### Source details view +Finally let's look at the Source in more detail - go back to the Details tab, and click `GitRepository/flux-system` from the summary at the top of the page. + +![Source detail view showing details for an object](/img/dashboard-source-flux.png) + +As with an Application detail view, you can see key information about how the resource is defined. + +Now we are familiar with the Dashboard, let's deploy a new application :sparkles:. diff --git a/website/versioned_docs/version-0.24.0/gitops-run/get-started.mdx b/website/versioned_docs/version-0.24.0/gitops-run/get-started.mdx new file mode 100644 index 0000000000..3d7afb8b10 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-run/get-started.mdx @@ -0,0 +1,306 @@ +--- +title: Tutorial +hide_title: true +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Get Started with GitOps Run + +GitOps Run supports two different modes of operation - directly on a +cluster or as sandboxed sessions. The sandboxed sessions are intended +for shared environments where multiple users are running their own +sessions, whereas the direct mode is intended for a local cluster. + +In this tutorial we are going to use 'direct mode' to run GitOps on a local +cluster. + + +## Prerequisites +### Required +- Install the GitOps CLI. See [the installation](../installation/weave-gitops.mdx#gitops-cli) + +### Optional +- This guide uses [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) for demonstrations, but it is not required to use GitOps Run +- The [Flux CLI](https://fluxcd.io/flux/installation/) is the quickest way to generate resource definitions, but the files can also be created manually + +## Create a local Kubernetes cluster + +To get started with GitOps Run, you need a Kubernetes cluster. There +are many tools to set up a local cluster for test and development +purposes. + +:::note +This tutorial assumes you have full control of your cluster - we +recommend a local cluster, but you can also use a remote cluster where +you have full `cluster-admin` privileges. +::: + + + + +Install [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) and run + +```bash +kind create cluster +``` + + + +Install [k3d](https://k3d.io/) and run + +```bash +k3d cluster create mycluster +``` + + + +Install [minikube](https://minikube.sigs.k8s.io/docs/start/) and run + +```bash +minikube start +``` + + + +Install [Docker Desktop](https://www.docker.com/products/docker-desktop/) and enable Kubernetes. Then run + +``` +kubectl config set-context docker-desktop +``` + + + +GitOps Run works on any Kubernetes platform, but to avoid accidents +you have to explicitly white-list the context name. + +First, find the name of the context where you want to run `gitops beta run` - in this example, there's a cluster with the name "dev": + +```bash +$ kubectl config get-contexts +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* dev dev dev +``` + +Then, for any `gitops beta run` command in this guide, you'll have to add the flag `--allow-k8s-context=dev` + + + +Before you continue, make sure `kubectl get nodes` returns a node which is `Ready`. + +## Create a GitOps repository + +You need to set up a Git repository to put your GitOps manifests +in. Any Git repository will do, for example create a new +[github](https://github.com/new) repository and clone that. + +You may alternatively fork an existing repository, as we have done for this guide. Head +to [podinfo](https://github.com/stefanprodan/podinfo) and create a fork with the +name `podinfo-gitops-run`. + +## Set up GitOps Run + +To start GitOps Run, clone your newly created repository or fork and change into +it. + +We will run the command with `--no-session` as it's a single user +cluster which we want to use in direct mode. The port-forward points +at the `podinfo` pod we will create later on. + +```bash +export GITHUB_USER= + +# you can ignore these two commands if you already created and cloned your repository +git clone git@github.com:$GITHUB_USER/podinfo-gitops-run.git + +cd podinfo-gitops-run +gitops beta run ./podinfo --no-session --port-forward namespace=dev,resource=svc/dev-podinfo,port=9898:9898 +``` + +You will now be asked if you want to install Flux and the GitOps +[dashboard](../getting-started/intro.mdx). Answer `yes` and **set a password**. + +:::tip +If you do not set a password, you won't be able to login to the GitOps UI +:scream:. +::: + +Shortly after you should be able to [open the dashboard](http://localhost:9001). +The username is `admin` and the password will be the one you set above. + +In your dashboard you will be able to see what is in your cluster, including +the resources that GitOps Run is operating. + +## Start modifying your deploment + +In your local GitOps repo, you will see that GitOps Run has created a new +directory called `podinfo`. Inside there is a single, mostly empty, `kustomization.yaml`. + +To create the automation for the `podinfo` app, we first have to add the resources to +run it - we'll create a new `Namespace`, a `HelmRepository` that +references the Helm repository where the manifests are stored, and a +`HelmRelease` that references the chart and version. We can use the +`flux` CLI to generate the resource definition, or we can just create +the yaml files ourselves. + + + + +```bash +cat < ./podinfo/namespace.yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dev +EOF +flux create source helm podinfo --url=https://stefanprodan.github.io/podinfo --namespace=dev --export > ./podinfo/podinfo-source.yaml +flux create helmrelease podinfo --source=HelmRepository/podinfo --chart=podinfo --export --namespace=dev --target-namespace=dev > ./podinfo/podinfo-helmrelease.yaml +``` + +You should see three files now exist in your `./podinfo` directory. + + + + +Save the contents of the following files to the `./podinfo` directory. + +
./podinfo/namespace.yaml + +```yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dev +``` + +
+ +
./podinfo/podinfo-source.yaml + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: podinfo + namespace: dev +spec: + interval: 1m0s + url: https://stefanprodan.github.io/podinfo +``` + +
+ +
./podinfo/podinfo-helmrelease.yaml + +```yaml +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: podinfo + namespace: dev +spec: + chart: + spec: + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + interval: 1m0s + targetNamespace: dev +``` + +
+ +
+
+ +The only remaining step is to import these files in the auto-generated +`kustomization.yaml`. Open it up, and you should see the following: + +```yaml title="./podinfo/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] # 👋 Start adding the resources you want to sync here +``` + +Change the last line so it instead looks like the following: + +```yaml title="./podinfo/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +// highlight-start +resources: + - namespace.yaml + - podinfo-source.yaml + - podinfo-helmrelease.yaml +// highlight-end +``` + +GitOps Run should now automatically upload these manifests and install +them. The dashboard should show you how the resources are being +reconciled, and when they're Ready you will be able to see podinfo +[here](http://localhost:9898). + + +## Update your app + +Now that GitOps Run is continuously watching and reconciling your +local files onto your cluster, we can start modifying the resources. + +We're going to be modifying the `podinfo` we set up in the previous +step. Open the current [podinfo](http://localhost:9898) and pay +attention to the background color. + +Now, open your HelmRelease file and add the values at the bottom, as +indicated: + +```yaml title="./podinfo/podinfo-helmrelease.yaml" +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: podinfo + namespace: dev +spec: + chart: + spec: + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + interval: 1m0s + targetNamespace: dev +// highlight-start + values: + ui: + color: "#C32148" +// highlight-end +``` + +When you hit save, you'll see GitOps Run upload new files, and once +it's reconciled the `podinfo` background will have been changed to a bright red. + +## Next steps: GitOps Mode + +Now that we've used this interactive environment to set up the +resources we want, we can switch over to full GitOps mode, where Flux +is permanently pulling from your remote Git repository. + +Hit `ctrl-c` to stop GitOps Run. It will ask you whether you want to bootstrap +your cluster into full GitOps mode. If you answer yes, it +will take you through a wizard to help you set this up. You'll need information +such as the remote repository, the branch name, etc. + +When you hit submit, it will set up the repository and branch, add +Flux manifests, as well as the files you were just working on. From +this point on, you can make persistent changes by pushing them to this +repository. diff --git a/website/versioned_docs/version-0.24.0/gitops-run/overview.mdx b/website/versioned_docs/version-0.24.0/gitops-run/overview.mdx new file mode 100644 index 0000000000..e1e855d48e --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-run/overview.mdx @@ -0,0 +1,74 @@ +--- +title: Overview +hide_title: true +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# GitOps Run Overview + +## Introduction + +GitOps is a powerful mechanism for creating consistent environments and having +multiple clusters stay in sync. If you build out your infrastructure correctly +you get predictable behaviours for your teams and you can have new environments +up and running quickly. + +However, GitOps can be challenging for the everyday developer +to work with and it can create some friction, especially for developers who are +less familiar with Kubernetes or Flux. + +The purpose of GitOps Run is to remove the complexity for developers so that +platform operators can create developer environments easily, and application developers +can benefit from GitOps and focus on writing code. + +Watch this video to learn more about how GitOps Run can help your team +get started with GitOps: + + + +### Additional Benefits +* No need to run `kubectl`, `helm`, `kustomize`, or `flux` CLI commands. Just create the manifests and we'll put them on the cluster for you. +* Reduces the cycle time when configuring your cluster. With normal GitOps + there is a lot of commit/push/reconcile workflows that can be frustrating. + This skips that and you can test your changes directly before committing and + pushing code to your Git repository. +* Multiple options for debugging Flux such as using the Dashboard that comes with Weave GitOps or getting live feedback by leveraging the [GitOps Tools for Flux](https://marketplace.visualstudio.com/items?itemName=Weaveworks.vscode-gitops-tools) VSCode extension. + +## Terminology + +### Modes + +#### GitOps: +This is the default mode we are always aiming for when using Weave GitOps. Whenever GitOps Run +is not active we want users to be in this mode. This means that the cluster is being driven by +some mechanism reading from Git, ideally Flux, and that system is applying those changes +to the cluster. + +#### Run: +This is when the cluster has GitOps Run running on the cluster. There is a live reload session +that is occurring and the cluster is no longer in a pure GitOps or Snowflake mode. Ideally, when +GitOps Run stops running that the cluster enters into the GitOps mode that is defined above. + +#### Snowflake: +We are referring to a cluster that is driven by some other mechanism outside of GitOps or Run. +For example, a platform operator could have run various kubectl apply commands and installed +a few helm charts using helm. The only way for this cluster to reach this state again is to +rerun those commands or to transition to GitOps mode. + +### Sessions + +Weave GitOps Run can has two different ways of interacting with your cluster. + +#### Sandboxed + +This means we spin up a virtual cluster on your cluster creating a sandbox environment for your applications. +What this means is that you are running this application in an isolated environment and it will not impact the +rest of your cluster. When you are done and turn off GitOps Run we will then clean up the virtual cluster and +everything that was installed on it. You can push your changes to Git and then our system will take care of +pulling those changes onto the cluster. + +#### Cluster +When you pass the `--no-session` flag when starting up GitOps Run this means we do not put those payloads in +their own sandboxed environment. We will load them up directly into the cluster just as you would any other app. diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/annotations.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/annotations.mdx new file mode 100644 index 0000000000..95a4890f02 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/annotations.mdx @@ -0,0 +1,34 @@ +--- +title: Annotations +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Annotations + +## The `add-common-bases` annotation + +The `templates.weave.works/add-common-bases: "true"` annotation can be used to +enable and disable the addition of a "common bases" `Kustomization` to the +list of rendered files. +This kustomization will sync a path that is common to all clusters (`clusters/bases`). + +An example usecase would be to ensure that certain RBAC or policies are applied +to all clusters using this template. + +## The `inject-prune-annotation` annotation + +The `templates.weave.works/inject-prune-annotation: "true"` annotation can be used to +enable and disable the injection of Flux's `prune` annotation into certain resources. + +When enabled, GitOps automatically injects a `kustomize.toolkit.fluxcd.io/prune: disabled` +annotation into every resource in the `spec.resourcetemplates` that is **not** a +`cluster.x-k8s.io.Cluster` and **not** a `gitops.weave.works.GitopsCluster`. + +The intention here is stop Flux from explicitly deleting subresources of the `Cluster` like +`AWSCluster`, `KubeadmControlPlane`, `AWSMachineTemplate` etc and let the CAPI +controllers handle their removal. + +This is the pattern recommended in the capi-quickstart guide https://cluster-api.sigs.k8s.io/user/quick-start.html#clean-up. + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/capd-example.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/capd-example.mdx new file mode 100644 index 0000000000..7f87132918 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/capd-example.mdx @@ -0,0 +1,21 @@ +--- +title: 'Example: CAPD Template' +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# CAPD Template Example + +This full example works with the CAPD provider, see [Cluster API Providers](../cluster-management/cluster-api-providers.mdx). + +import CodeBlock from "@theme/CodeBlock"; +import CapdTemplate from "!!raw-loader!../assets/templates/capd-template.yaml"; + + + {CapdTemplate} + + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/cli.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/cli.mdx new file mode 100644 index 0000000000..3c8676e6fe --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/cli.mdx @@ -0,0 +1,110 @@ +--- +title: CLI +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Template CLI + +The Enterprise `gitops` CLI tool provides a set of commands to help you manage your templates. + +Here we're going to talk about the `gitops create template` command that allows +you to render templates locally and airgapped, without a full WGE installation +in a Kubernetes cluster. + +## Use cases + +- In CI/CD systems where you want to render a template and then use the raw output in a pipeline +- For quickly debugging templates + +## Restrictions + +The `gitops create template` command only works with `GitOpsTemplate` objects. +It does not work with `CAPITemplate` objects. You should be able to migrate any +`CAPITemplate` objects to `GitOpsTemplate` with some small tweaks. + +:::info + +GitOpsTemplate or CAPITemplate? + +The only difference between `CAPITemplate` and `GitOpsTemplate` is the default +value of these two annotations: + +| Annotation | default value for `CAPITemplate` | default value for `GitOpsTemplate` | +| ----------- | ---------------- | ------------------ | +| `templates.weave.works/add-common-bases` | `"true"` | `"false"` | +| `templates.weave.works/inject-prune-annotations` | `"true"` | `"false"` | + +::: + + +## Installation + +See the Weave Gitops Enterprise [installation instructions](../installation/weave-gitops-enterprise/index.mdx#7-install-the-cli) for details on how to install the EE `gitops` CLI tool. + +## Getting started + +Using a local `GitOpsTemplate` manifest (such as this [CAPD +example](../capd-example)) with required parameters exported in the +environment, the command can render the template to one of the following: +1. The current kubecontext directly (default) +1. stdout with `--export` +1. The local file system with `--output-dir`, this will use the + `spec.resourcestemplates[].path` fields in the template to determine where to + write the rendered files. + This is the recommended approach for GitOps as you can then commit the + rendered files to your repository. + +```bash +gitops create template \ + --template-file capd-template.yaml \ + --output-dir ./clusters/ \ + --values CLUSTER_NAME=foo +``` + +## Profiles + +As in the UI you can add profiles to your template. However instead of reading +the latest version of a profile and its layers from a `HelmRepository` object +in the cluster, we instead read from your local helm cache. + +```bash +helm repo add weaveworks-charts https://raw.githubusercontent.com/weaveworks/weave-gitops-profile-examples/gh-pages +helm repo update +``` + +This particular helm repo provides a version of the `cert-manager` repo and others. + +### Supplying values to a profile + +You can supply a `values.yaml` file to a profile using the `values` parameter. +For example we can supply `cert-manager`'s `values.yaml` with: + +```bash +gitops create template \ + --template-file capd-template.yaml \ + --output-dir ./out \ + --values CLUSTER_NAME=foo \ + --profiles "name=cert-manager,namespace=foo,version=>0.1,values=cert-manager-values.yaml" +``` + +## Using a config file + +Instead of specifying the parameters on the command line you can supply a +config file. For example the above invocation can be replaced like so: + +```yaml title=config.yaml +template-file: capd-capi-template.yaml +output-dir: ./out +values: + - CLUSTER_NAME=foo +profiles: + - name=cert-manager,namespace=foo,version=>0.1,values=cert-manager-values.yaml +``` + +and executed with: + +```bash +gitops create template --config config.yaml +``` diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/create-cluster-example.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/create-cluster-example.mdx new file mode 100644 index 0000000000..b0d67e1151 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/create-cluster-example.mdx @@ -0,0 +1,33 @@ +--- +title: 'Example: Template to Create a CAPI Cluster' +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# CAPI Cluster Template Example + +GitOps template objects need to be wrapped with the `GitOpsTemplate` custom +resource and then loaded into the management cluster. + +```yaml +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: cluster-template-development + labels: + weave.works/template-type: cluster +spec: + description: This is the std. CAPD template + renderType: templating + params: + - name: CLUSTER_NAME + description: This is used for the cluster naming. + resourcetemplates: + - apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "{{ .params.CLUSTER_NAME }}" +``` + + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/creating-templates.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/creating-templates.mdx new file mode 100644 index 0000000000..5ce78ad37a --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/creating-templates.mdx @@ -0,0 +1,128 @@ +--- +title: Creating Templates +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Creating GitOpsTemplates + +:::tip + +For complete examples of widely-used templates, see the [Quickstart +guide](../quickstart-templates). + +::: + +GitOps Templates were originally introduced to enable self-service operations +for the the cluster creation workflow. + +We have since extended this capability to cover Terraform, Crossplane and +general Kubernetes resources. + +An example template could, upon merging to a GitOps repository and reconciling in +a cluster, provide a running developer environment consisting of +an EKS cluster, an RDS database, and a branch and revision of the current +application through single template. + +Templates can be loaded into the cluster by Platform Operator by adding them to +the Flux-manage GitOps repository for the target cluster. Alternatively, they +can be applied directly to the cluster with `kubectl`. + +:::info + +Weave GitOps will search for templates in the `default` namespace. +This can be changed by configuring the `config.capi.namespace` value in the +Weave GitOps Enterprise Helm Chart. + +::: + + +## Template Type + +Template types are used by Weave GitOps to group the templates nicely in the +Dashboard UI. + +There are 4 recommended template types: +- `application` - for application templates +- `cluster` - for cluster templates +- `terraform` - for Terraform templates +- `pipeline` - for Pipeline templates + +Declare this in the object manifest by using the `weave.works/template-type` +label and setting the value as the name of the type. + +```yaml {7-8} +--- +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: example-template + namespace: default + labels: + weave.works/template-type: pipeline +spec: +# ... +``` + +## Template Components + +The rendering of certain component sections in a template can be enabled or +disabled with annotations. The annotation keys are of the form +`templates.weave.works/COMPONENT-enabled` and have `boolean` values. + +Supported components: +- `profiles` +- `kustomizations` +- `credentials` + +Example: + +```yaml +annotations: + templates.weave.works/profiles-enabled: "true" + templates.weave.works/kustomizations-enabled: "false" + templates.weave.works/credentials-enabled: "true" +``` + +## In-UI Template Editing + +When rendering a template, a `templates.weave.works/create-request` annotation +is added by default to the first resource in the `resourcetemplates`. + +It can be added to any other resource by simply adding the annotation in empty form. +This annotation holds information about which template generated the resource +and the parameter values used as a json string. + +If the resource type is one of the following and has this annotation, an +`Edit resource` button will appear in the GitOps UI which allows the editing of +the resource by users, after which it will be re-rendered: +- Applications: + - `HelmRelease` + - `Kustomization` +- Sources: + - `HelmRepository` + - `GitRepository` +- Clusters: + - `GitopsCluster` + +Example: +```yaml {10,14} +spec: + resourcetemplates: + - apiVersion: v1 + kind: ConfigMap + metadata: + name: my-configmap + data: + my-key: my-value + - apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + # This annotation will add an `Edit resource` button in the UI for this resource + annotations: + templates.weave.works/create-request: '' + name: nginx + namespace: default +``` + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/imgs/quickstart-templates-deployed.png b/website/versioned_docs/version-0.24.0/gitops-templates/imgs/quickstart-templates-deployed.png new file mode 100644 index 0000000000..8cc86d6fc2 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/gitops-templates/imgs/quickstart-templates-deployed.png differ diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/imgs/quickstart-templates-view.png b/website/versioned_docs/version-0.24.0/gitops-templates/imgs/quickstart-templates-view.png new file mode 100644 index 0000000000..f38d1bc413 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/gitops-templates/imgs/quickstart-templates-view.png differ diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/intro.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/intro.mdx new file mode 100644 index 0000000000..56db055fb0 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/intro.mdx @@ -0,0 +1,47 @@ +--- +title: Introduction +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Introduction + +`GitOpsTemplate`s enable Application Developers to self-service components and +services easily through the Weave GitOps Dashboard. + +With a `GitOpsTemplate`, any resource that can be expressed in yaml +(such as basic kubernetes resources, Flux primitives, TF controller, Crossplane, Cluster API) +can be templated into a standardised definition, available for use. + +Templates are simple YAML files, which can be enriched with Parameters, Variables, +Metadata and conditions. + +Whan an Application Developer uses a Template via the GIU, the rendered +template is added to their GitOps repository via a PR. When merged and reconciled, the resources in +the template are created. These can be any resource, such as a `MachinePool` for +CAPI objects, a Flux Kustomization or a Terraform Controller resource. + +:::tip + +The only restriction on `GitOpsTemplate`s is that they are valid `yaml`. Beyond +that, a rendered template can create any resource desired :sparkles:. + +::: + +![quickstart templates view](imgs/quickstart-templates-view.png) + +:::info + +GitOpsTemplate or CAPITemplate? + +The only difference between `CAPITemplate` and `GitOpsTemplate` is the default +value of these two annotations: + +| Annotation | default value for `CAPITemplate` | default value for `GitOpsTemplate` | +| ----------- | ---------------- | ------------------ | +| `templates.weave.works/add-common-bases` | `"true"` | `"false"` | +| `templates.weave.works/inject-prune-annotations` | `"true"` | `"false"` | + +::: + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/params.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/params.mdx new file mode 100644 index 0000000000..89f524fdb8 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/params.mdx @@ -0,0 +1,50 @@ +--- +title: Parameters +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Parameters + +When users have chosen a template, they will be presented with a form to +complete. + +This form will collect the specific resource configuration which they would like +applied to their instance. + +Resource variables, or parameters, are set by the template author in the +template object manifest under `spec.params`. + +## Required params + +Some params are **required** for all resources as they will be used to generate +paths for the eventually rendered resources. + +These are: +- `CLUSTER_NAME` +- `RESOURCE_NAME` + +## Parameters metadata + +The following metadata fields can be added for each parameter under +`spec.params`. These will get rendered nicely in the UI form allowing users to understand +what each field is for. + +- `name`: The variable name within the resource templates. +- `description`: Description of the parameter. This will be rendered in both the UI + and CLI. +- `options`: The list of possible values this parameter can be set to. +- `required` - Whether the parameter must contain a non-empty value. +- `default` - Default value of the parameter. + +Example: +```yaml +spec: + params: + - name: IP_ADDRESS + description: 'The IP address of this service' + options: [1.2.3.4, 5.6.7.8] + default: 1.2.3.4 +``` + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/profiles.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/profiles.mdx new file mode 100644 index 0000000000..77a4e6d06d --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/profiles.mdx @@ -0,0 +1,109 @@ +--- +title: Profiles +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Adding Profiles to Templates + +Profiles are enhanched Helm Charts which allow operators to make additional +components either optional or required to developers using self-service +templates. + +Default and required profiles can be added via the template `spec.charts` section. + +```yaml +spec: + charts: + items: + - name: nginx + version: 1.0.0 + targetNamespace: nginx + - name: cert-manager + targetNamespace: cert-manager +``` + +A template with the above profiles would offer Application Developers the option +to add `nginx` and `cert-manager` resources to their templated resources, ready +for deployment to their cluster. + +## Profile Operator Settings + +Keys available in the `spec.charts` section and the template variables available to them. + +| **Key** | **Description** | **Template vars** | +| ----------------------------- | -------------------------------------------- | ----------------- | +| `helmRepositoryTemplate.path` | Path the `HelmRepository` will be written to | `params` | +| `items` | list of charts to configure, see below | | + +Keys available in the `spec.charts.items` entries and the template variables available to them. + +| **Key** | **Description** | **Template vars** | +| ------------------ | ---------------------------------------------------------------------- | ----------------- | +| `template.content` | Full or partial `HelmRelease` CR template | `params` | +| `template.path` | Path the HelmRelease will be written to | `params` | +| `chart` | Shortcut to `HelmRelease.spec.chart.spec.chart` | | +| `version` | Shortcut to `HelmRelease.spec.chart.spec.version` | | +| `targetNamespace` | Shortcut to `HelmRelease.spec.targetNamespace` | | +| `values` | Shortcut to `HelmRelease.spec.values` | `params` | +| `layer` | Layer to install as | | +| `required` | (default=false) Allow the user to de-select this profile | +| `editable` | (default=false) Allow the user to edit the values.yaml of this profile | + +
Expand for a complete yaml example + +```yaml +spec: + charts: + helmRepositoryTemplate: + path: clusters/${CLUSTER_NAME}/helm-repositories.yaml + items: + - chart: cert-manager + version: v1.5.3 + editable: false + required: true + values: + installCRDs: ${CERT_MANAGER_INSTALL_CRDS} + targetNamespace: cert-manager + layer: layer-1 + template: + path: clusters/${CLUSTER_NAME}/cert-manager.yaml + content: + metadata: + labels: + app.kubernetes.io/name: cert-manager + spec: + retries: ${CERT_MANAGER_RETRY_COUNT} +``` + +:::tip + +`template.content` will be merged over the top of a default `HelmRelease` CR so it does not need to be complete. + +::: + +
+ +## Declaring Profiles with Annotations + +:::caution Deprecated feature + +Where possible please use the `spec.charts` section as detailed above to declare profiles. + +::: + +Profiles can also be included within templates by the +`capi.weave.works/profile-INDEX` annotation. + +```yaml +annotations: + capi.weave.works/profile-0: '{"name": "NAME", "version": "VERSION", "editable": EDITABLE, "namespace": "NAMESPACE"}' +``` + +Where: + +- `name` - is the name of the profile in the default profiles repository +- `version` - (optional) will choose the default version +- `namespace` - (optional) is the default target namespace for the profile +- `editable` - (optional, default=`false`), allow the user to de-select this profile, making it a default instead of a requirement. diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/quickstart-templates.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/quickstart-templates.mdx new file mode 100644 index 0000000000..5dfec04ce1 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/quickstart-templates.mdx @@ -0,0 +1,107 @@ +--- +title: Quickstart +hide_title: true +--- + +import Link from "@docusaurus/Link"; +import TierLabel from "../_components/TierLabel"; + +# Quickstart GitOps Templates + +`Quickstart` templates are [`GitOpsTemplate`s](https://docs.gitops.weave.works/docs/gitops-templates/templates/) +that you could use when getting started with Weave Gitops Enterprise +It aims to provide a simplified basic experience. + +## Getting Started + +The templates exist as a Helm Chart in the [weave-gitops-quickstart](https://github.com/weaveworks/weave-gitops-quickstart) +github repo. + +To get started, add the following `HelmRelease` object to your Weave GitOps Enterprise +configuration repo for your management cluster. + +
Expand to view + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: weave-gitops-quickstart + namespace: flux-system +spec: + interval: 10m0s + ref: + branch: main + url: https://github.com/weaveworks/weave-gitops-quickstart +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: quickstart-templates + namespace: flux-system +spec: + chart: + spec: + chart: "quickstart-templates" + version: ">=0.1.0" + sourceRef: + kind: GitRepository + name: weave-gitops-quickstart + namespace: flux-system + interval: 10m0s +``` + +
+ +Commit and merge the above file. Once the `HelmRelease` has been successfully +deployed to your cluster, navigate to your Weave GitOps UI Dashboard. You will +see that the `templates` Chart is now deployed to your cluster. + +![quickstart templates deployed](imgs/quickstart-templates-deployed.png) + +If you click on the `Templates` tab in the sidebar, you will see the Quickstart +templates are now available for use: + +![quickstart templates view](imgs/quickstart-templates-view.png) + +## Available Templates + +The following [pipeline](../pipelines/pipeline-templates.mdx) templates have +been made available on your Weave GitOps Enterprise instance: + +- `pipeline-view`: A template to create a sample pipeline to visualize a + `HelmRelease` application delivered to dev, test and prod environments. +- `pipeline-promotion-resources`: A template to create the Flux Notification + Controller resources required for promoting applications via pipelines. +- `pipeline-view-promote-by-cluster`: A template to create pipelines for hard + tenancy when applications are isolated by cluster. +- `pipeline-view-promote-by-namespace`: A template to create pipelines for soft + tenancy when applications are isolated by namespace. + +## Using `GitOpsTemplate`s as a Platform Engineer + +The above Quickstart templates are designed to provide a practical getting started +experience. We encourage Platform Operators to start off with these templates +within their team to ramp up on using Weave GitOps. + +If the need arises later, operators can always expand on these templates to +develop their own set of self-service capabilities. + +## Using `GitOpsTemplate`s as an Application Developer + +As a developer using Weave GitOps Enterprise, use the templates to explore +GitOps's capabilities. For example, to create a pipeline for your application: +use the above template provided by your Operations team to create required +resources. Once they have been added to your GitOps repository, you can adapt +the rendered resources to meet your needs. + +:::tip Want to contribute? + +The Quickstart templates are maintained by the Weave Gitops team. If you would +like to make alterations, suggest fixes, or even contribute a new template which +you find cool, just head to the [repo](https://github.com/weaveworks/weave-gitops-quickstart) +and open a new issue or PR! + +::: + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/repo-rendered-paths.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/repo-rendered-paths.mdx new file mode 100644 index 0000000000..cb3f3d7866 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/repo-rendered-paths.mdx @@ -0,0 +1,121 @@ +--- +title: Rendered Template Paths +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Rendered Template Paths + +Template authors can configure the eventual locatation of the rendered template +in the user's GitOps repository. + +This allows for more control over where different resources in the template are rendered. + +## Configuring Paths + +The path for rendered resources is configured via the +`spec.resourcetemplates[].path` field. + +:::tip Important to note: +- The path is relative to the repository root +- The path can be templated using params +::: + +
Expand to see example + +```yaml +spec: + resourcetemplates: + // highlight-next-line + - path: clusters/${CLUSTER_NAME}/definition/cluster.yaml + content: + - apiVersion: cluster.x-k8s.io/v1alpha4 + kind: Cluster + metadata: + name: ${CLUSTER_NAME} + ... + - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AWSCluster + metadata: + name: ${CLUSTER_NAME} + ... + // highlight-next-line + - path: clusters/${CLUSTER_NAME}/workloads/helmreleases.yaml + content: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: ${CLUSTER_NAME}-nginx + ... + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: ${CLUSTER_NAME}-cert-manager + ... +``` + +
+ +### Configuring paths for `charts` + +The `spec.charts.helmRepositoryTemplate.path` and `spec.charts.items[].template.path` fields can be used to specify the paths of these resources: + +Example + +```yaml +spec: + charts: + helmRepositoryTemplate: + // highlight-next-line + path: clusters/${CLUSTER_NAME}/workloads/helm-repo.yaml + items: + - chart: cert-manager + version: 0.0.8 + template: + // highlight-next-line + path: clusters/${CLUSTER_NAME}/workloads/cert-manager.yaml +``` + + +## Default Paths + +If the `spec.resourcetemplates[].path` is omitted, a default path for the +rendered template is calculated. + +In this case some of the submitted params are used. Users **must** provide one of the following parameters: +- `CLUSTER_NAME` +- `RESOURCE_NAME` + +To ensure users supply these values, set the parameters to `required` in the the +template definition: + +```yaml +spec: + params: + - name: RESOURCE_NAME + required: true + # or + - name: CLUSTER_NAME + required: true +``` + +:::caution Important + +The **kustomization** feature and the `add-common-bases` annotation feature **always** use a calculated default path. +If you are using these features one of `CLUSTER_NAME` or `RESOURCE_NAME` +**must** be provided, even if you specify a `path` for all the other resources in the template. + +::: + +The default path for a template has a few components: +- From the params: `CLUSTER_NAME` or `RESOURCE_NAME`, **required**. +- From the params: `NAMESPACE`, default: `default` +- From `values.yaml` for the Weave GitOps Enterprise `mccp` chart: `values.config.capi.repositoryPath`, default: `clusters/management/clusters` + +These are composed to create the path: +`${repositoryPath}/${NAMESPACE}/${CLUSTER_OR_RESOURCE_NAME}.yaml` + +Using the default values and supplying `CLUSTER_NAME` as `my-cluster` will result in the path: +`clusters/management/clusters/default/my-cluster.yaml` + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/resource-templates.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/resource-templates.mdx new file mode 100644 index 0000000000..aae7548ee5 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/resource-templates.mdx @@ -0,0 +1,63 @@ +--- +title: Resource Templates +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Resource templates + +Resource templates are used to create Kubernetes resources. They are defined in the `spec.resourcetemplates` section of the template. + +### The `content` key + +The `content` key is used to define a list of resources: + +```yaml +spec: + resourcetemplates: + - content: + - apiVersion: v1 + kind: Namespace + metadata: + name: nginx + - apiVersion: v1 + kind: Namespace + metadata: + name: cert-manager +``` + +### The `raw` key + +The `raw` key is used to define a raw string that will written to the specified path. + +This can be useful to preserve comments or formatting in the rendered resource. + +```yaml +spec: + resourcetemplates: + - path: "helm-release.yaml" + raw: | + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: podinfo + namespace: prod-github + spec: + interval: 1m + chart: + spec: + chart: podinfo + version: "6.0.0" # {"$promotion": "flux-system:podinfo-github:prod"} + sourceRef: + kind: HelmRepository + name: podinfo + interval: 1m +``` + +:::info + +- The `raw` key is not compatible with the `content` key. Only one of the two can be used. +- The `raw` key data must still be a valid kubernetes unstructured object. + +::: diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/supported-langs.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/supported-langs.mdx new file mode 100644 index 0000000000..bc61865948 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/supported-langs.mdx @@ -0,0 +1,93 @@ +--- +title: Supported Templating Languages +hide_title: true +--- +import TierLabel from "../_components/TierLabel"; + +# Supported Templating Languages + +The following templating languages are supported: +- envsubst (default) +- templating + +Declare the templating language to be used to render the template by setting `spec.renderType`. + +## Envsubst + +`envsubst`, which is short for 'environment substitution', uses [envsubst](https://github.com/a8m/envsubst) +for rendering. +This templating format is used by [clusterctl](https://cluster-api.sigs.k8s.io/clusterctl/overview.html). + +Variables can be set for rendering into the template in the `${VAR_NAME}` +syntax. + +### Supported Functions + +| __Expression__ | __Meaning__ | +| ----------------- | -------------- | +| `${var}` | Value of `$var` +| `${#var}` | String length of `$var` +| `${var^}` | Uppercase first character of `$var` +| `${var^^}` | Uppercase all characters in `$var` +| `${var,}` | Lowercase first character of `$var` +| `${var,,}` | Lowercase all characters in `$var` +| `${var:n}` | Offset `$var` `n` characters from start +| `${var:n:len}` | Offset `$var` `n` characters with max length of `len` +| `${var#pattern}` | Strip shortest `pattern` match from start +| `${var##pattern}` | Strip longest `pattern` match from start +| `${var%pattern}` | Strip shortest `pattern` match from end +| `${var%%pattern}` | Strip longest `pattern` match from end +| `${var-default}` | If `$var` is not set, evaluate expression as `$default` +| `${var:-default}` | If `$var` is not set or is empty, evaluate expression as `$default` +| `${var=default}` | If `$var` is not set, evaluate expression as `$default` +| `${var:=default}` | If `$var` is not set or is empty, evaluate expression as `$default` +| `${var/pattern/replacement}` | Replace as few `pattern` matches as possible with `replacement` +| `${var//pattern/replacement}` | Replace as many `pattern` matches as possible with `replacement` +| `${var/#pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` start +| `${var/%pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` end + +## Templating + +Templating uses text/templating for rendering, using go-templating style syntax `{{ .params.CLUSTER_NAME }}` +where params are provided by the `.params` variable. +Template functions can also be used with the syntax `{{ .params.CLUSTER_NAME | FUNCTION }}`. + +### Supported Functions + +As taken (from the [Sprig library](http://masterminds.github.io/sprig/)) + +| __Function Type__ | __Functions__ | +| ----------------- | -------------- | +| String Functions | *trim*, *wrap*, *randAlpha*, *plural* +| String List Functions | *splitList*, *sortAlpha* +| Integer Math Functions | *add*, *max*, *mul* +| Integer Slice Functions | *until*, untilStep +| Float Math Functions | *addf*, *maxf*, *mulf* +| Date Functions | *now*, *date* +| Defaults Functions | *default*, *empty*, *coalesce*, *fromJson*, *toJson*, *toPrettyJson*, *toRawJson*, ternary +| Encoding Functions | *b64enc*, *b64dec* +| Lists and List Functions | *list*, *first*, *uniq* +| Dictionaries and Dict Functions | *get*, *set*, *dict*, *hasKey*, *pluck*, *dig*, *deepCopy* +| Type Conversion Functions | *atoi*, *int64*, *toString* +| Flow Control Functions | *fail* +| UUID Functions | *uuidv4* +| Version Comparison Functions | *semver*, semverCompare +| Reflection | *typeOf*, *kindIs*, *typeIsLike* + +### Custom Delimiters + +The default delimiters for `renderType: templating` are `{{` and `}}`. +These can be changed by setting the `templates.weave.works/delimiters` annotation +on the template. For example: + +- `templates.weave.works/delimiters: "{{,}}"` - default +- `templates.weave.works/delimiters: "${{,}}"` + - Use `${{` and `}}`, for example `"${{ .params.CLUSTER_NAME }}"` + - Useful as `{{` in yaml is invalid syntax and needs to be quoted. If you need to provide a un-quoted number value like `replicas: 3` you should use these delimiters. + - :x: `replicas: {{ .params.REPLICAS }}` Invalid yaml + - :x: `replicas: "{{ .params.REPLICAS }}"` Valid yaml, incorrect type. The type is a `string` not a `number` and will fail validation. + - :white_check_mark: `replicas: ${{ .params.REPLICAS }}` Valid yaml and correct `number` type. +- `templates.weave.works/delimiters: "<<,>>" ` + - Use `<<` and `>>`, for example `<< .params.CLUSTER_NAME >>` + - Useful if you are nesting templates and need to differentiate between the delimiters used in the inner and outer templates. + diff --git a/website/versioned_docs/version-0.24.0/gitops-templates/versions.mdx b/website/versioned_docs/version-0.24.0/gitops-templates/versions.mdx new file mode 100644 index 0000000000..7d123925b2 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitops-templates/versions.mdx @@ -0,0 +1,67 @@ +--- +title: Version Information +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Version Information + +There are now multiple published versions of the template CRD. + +## Migration notes + +### `v1alpha1` to `v1alpha2` + +When manually migrating a template from `v1alpha1` to `v1alpha2` (for example in git) you will need to: +1. Update the `apiVersion` to `templates.weave.works/v1alpha2` +1. Move the `spec.resourcetemplates` field to `spec.resourcetemplates[0].contents` +1. Either leave the `spec.resourcetemplates[0].path` field empty or give it a sensible value. + +If you experience issues with the path not being recognised when Flux reconciles +the new template versions, try manually applying the new template to the cluster directly with: +1. Run `kubectl apply -f capi-template.yaml` +1. Run `flux reconcile kustomization --with-source flux-system` **twice**. + +## Conversion Webhook + +A conversion webhook is hosted by the `flux-system/templates-controller-webhook-service` service. +`v1alpha1` templates are automatically converted to `v1alpha2` when they are loaded into the cluster. + +### v1alpha1 to v1alpha2 conversion + +The `spec.resourcetemplates` field is moved to `spec.resourcetemplates[0].contents` and the `spec.resourcetemplates[0].path` is left empty. +When the tempalte is rendered the `spec.resourcetemplates[0].path` field has a default value calculated. + +## `v1alpha2` (default) notes + +This version changes the type of `spec.resourcetemplates` from a list of objects to a list of files with a `path` and `contents`: + +Example: +```yaml +spec: + resourcetemplates: + - path: "clusters/{{ .params.CLUSTER_NAME }}.yaml" + contents: + - apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "{{ .params.CLUSTER_NAME }}" + path: "clusters/{{ .params.CLUSTER_NAME }}.yaml" +``` + +## `v1alpha1` notes + +The original version of the template. This version is deprecated and will be removed in a future release. + +It uses `spec.resourcetemplates` as a list of resources to render. + +Example: +```yaml +spec: + resourcetemplates: + - apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "{{ .params.CLUSTER_NAME }}" +``` diff --git a/website/versioned_docs/version-0.24.0/gitopssets/_api-toc.json b/website/versioned_docs/version-0.24.0/gitopssets/_api-toc.json new file mode 100644 index 0000000000..2a03f7c8fd --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitopssets/_api-toc.json @@ -0,0 +1,35 @@ +[ +{ "level": 3, "value": "GitOpsSet", "id": "templates.weave.works/v1alpha1.GitOpsSet" } +, +{ "level": 3, "value": "APIClientGenerator", "id": "templates.weave.works/v1alpha1.APIClientGenerator" } +, +{ "level": 3, "value": "ClusterGenerator", "id": "templates.weave.works/v1alpha1.ClusterGenerator" } +, +{ "level": 3, "value": "GitOpsSetGenerator", "id": "templates.weave.works/v1alpha1.GitOpsSetGenerator" } +, +{ "level": 3, "value": "GitOpsSetNestedGenerator", "id": "templates.weave.works/v1alpha1.GitOpsSetNestedGenerator" } +, +{ "level": 3, "value": "GitOpsSetSpec", "id": "templates.weave.works/v1alpha1.GitOpsSetSpec" } +, +{ "level": 3, "value": "GitOpsSetStatus", "id": "templates.weave.works/v1alpha1.GitOpsSetStatus" } +, +{ "level": 3, "value": "GitOpsSetTemplate", "id": "templates.weave.works/v1alpha1.GitOpsSetTemplate" } +, +{ "level": 3, "value": "GitRepositoryGenerator", "id": "templates.weave.works/v1alpha1.GitRepositoryGenerator" } +, +{ "level": 3, "value": "GitRepositoryGeneratorDirectoryItem", "id": "templates.weave.works/v1alpha1.GitRepositoryGeneratorDirectoryItem" } +, +{ "level": 3, "value": "GitRepositoryGeneratorFileItem", "id": "templates.weave.works/v1alpha1.GitRepositoryGeneratorFileItem" } +, +{ "level": 3, "value": "HeadersReference", "id": "templates.weave.works/v1alpha1.HeadersReference" } +, +{ "level": 3, "value": "ListGenerator", "id": "templates.weave.works/v1alpha1.ListGenerator" } +, +{ "level": 3, "value": "MatrixGenerator", "id": "templates.weave.works/v1alpha1.MatrixGenerator" } +, +{ "level": 3, "value": "PullRequestGenerator", "id": "templates.weave.works/v1alpha1.PullRequestGenerator" } +, +{ "level": 3, "value": "ResourceInventory", "id": "templates.weave.works/v1alpha1.ResourceInventory" } +, +{ "level": 3, "value": "ResourceRef", "id": "templates.weave.works/v1alpha1.ResourceRef" } +] diff --git a/website/versioned_docs/version-0.24.0/gitopssets/_api.mdx b/website/versioned_docs/version-0.24.0/gitopssets/_api.mdx new file mode 100644 index 0000000000..9c646b09b8 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitopssets/_api.mdx @@ -0,0 +1,1047 @@ +

Packages:

+ +

templates.weave.works/v1alpha1

+

Package v1alpha1 contains API Schema definitions for the gitopssets v1alpha1 API group

+Resource Types: + +

GitOpsSet +

+

GitOpsSet is the Schema for the gitopssets API

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+templates.weave.works/v1alpha1 +
+kind
+string +
+GitOpsSet +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +GitOpsSetSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitOpsSet.

+
+generators
+ + +[]GitOpsSetGenerator + + +
+

Generators generate the data to be inserted into the provided templates.

+
+templates
+ + +[]GitOpsSetTemplate + + +
+

Templates are a set of YAML templates that are rendered into resources +from the data supplied by the generators.

+
+serviceAccountName
+ +string + +
+(Optional) +

The name of the Kubernetes service account to impersonate +when reconciling this Kustomization.

+
+
+status
+ + +GitOpsSetStatus + + +
+
+

APIClientGenerator +

+

+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +

+

APIClientGenerator defines a generator that queries an API endpoint and uses +that to generate data.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

The interval at which to poll the API endpoint.

+
+endpoint
+ +string + +
+(Optional) +

This is the API endpoint to use.

+
+method
+ +string + +
+

Method defines the HTTP method to use to talk to the endpoint.

+
+jsonPath
+ +string + +
+

JSONPath is string that is used to modify the result of the API +call.

+

This can be used to extract a repeating element from a response. +https://kubernetes.io/docs/reference/kubectl/jsonpath/

+
+headersRef
+ + +HeadersReference + + +
+(Optional) +

HeadersRef allows optional configuration of a Secret or ConfigMap to add +additional headers to an outgoing request.

+

For example, a Secret with a key Authorization: Bearer abc123 could be +used to configure an authorization header.

+
+body
+ + +Kubernetes pkg/apis/apiextensions/v1.JSON + + +
+(Optional) +

Body is set as the body in a POST request.

+

If set, this will configure the Method to be POST automatically.

+
+singleElement
+ +bool + +
+(Optional) +

SingleElement means generate a single element with the result of the API +call.

+

When true, the response must be a JSON object and will be returned as a +single element, i.e. only one element will be generated containing the +entire object.

+
+

ClusterGenerator +

+

+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +

+

ClusterGenerator defines a generator that queries the cluster API for +relevant clusters.

+ + + + + + + + + + + + + +
FieldDescription
+selector
+ + +Kubernetes meta/v1.LabelSelector + + +
+(Optional) +

Selector is used to filter the clusters that you want to target.

+

If no selector is provided, no clusters will be matched.

+
+

GitOpsSetGenerator +

+

+(Appears on: +GitOpsSetSpec) +

+

GitOpsSetGenerator is the top-level set of generators for this GitOpsSet.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+list
+ + +ListGenerator + + +
+
+pullRequests
+ + +PullRequestGenerator + + +
+
+gitRepository
+ + +GitRepositoryGenerator + + +
+
+matrix
+ + +MatrixGenerator + + +
+
+cluster
+ + +ClusterGenerator + + +
+
+apiClient
+ + +APIClientGenerator + + +
+
+

GitOpsSetNestedGenerator +

+

+(Appears on: +MatrixGenerator) +

+

GitOpsSetNestedGenerator describes the generators usable by the MatrixGenerator. +This is a subset of the generators allowed by the GitOpsSetGenerator because the CRD format doesn’t support recursive declarations.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+list
+ + +ListGenerator + + +
+
+gitRepository
+ + +GitRepositoryGenerator + + +
+
+pullRequests
+ + +PullRequestGenerator + + +
+
+cluster
+ + +ClusterGenerator + + +
+
+apiClient
+ + +APIClientGenerator + + +
+
+

GitOpsSetSpec +

+

+(Appears on: +GitOpsSet) +

+

GitOpsSetSpec defines the desired state of GitOpsSet

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitOpsSet.

+
+generators
+ + +[]GitOpsSetGenerator + + +
+

Generators generate the data to be inserted into the provided templates.

+
+templates
+ + +[]GitOpsSetTemplate + + +
+

Templates are a set of YAML templates that are rendered into resources +from the data supplied by the generators.

+
+serviceAccountName
+ +string + +
+(Optional) +

The name of the Kubernetes service account to impersonate +when reconciling this Kustomization.

+
+

GitOpsSetStatus +

+

+(Appears on: +GitOpsSet) +

+

GitOpsSetStatus defines the observed state of GitOpsSet

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the GitOpsSet

+
+inventory
+ + +ResourceInventory + + +
+(Optional) +

Inventory contains the list of Kubernetes resource object references that +have been successfully applied

+
+

GitOpsSetTemplate +

+

+(Appears on: +GitOpsSetSpec) +

+

GitOpsSetTemplate describes a resource to create

+ + + + + + + + + + + + + + + + + +
FieldDescription
+repeat
+ +string + +
+

Repeat is a JSONPath string defining that the template content should be +repeated for each of the matching elements in the JSONPath expression. +https://kubernetes.io/docs/reference/kubectl/jsonpath/

+
+content
+ + +k8s.io/apimachinery/pkg/runtime.RawExtension + + +
+

Content is the YAML to be templated and generated.

+
+

GitRepositoryGenerator +

+

+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +

+

GitRepositoryGenerator generates from files in a Flux GitRepository resource.

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+repositoryRef
+ +string + +
+

RepositoryRef is the name of a GitRepository resource to be generated from.

+
+files
+ + +[]GitRepositoryGeneratorFileItem + + +
+

Files is a set of rules for identifying files to be parsed.

+
+directories
+ + +[]GitRepositoryGeneratorDirectoryItem + + +
+

Directories is a set of rules for identifying directories to be +generated.

+
+

GitRepositoryGeneratorDirectoryItem +

+

+(Appears on: +GitRepositoryGenerator) +

+

GitRepositoryGeneratorDirectoryItem stores the information about a specific +directory to be generated from.

+ + + + + + + + + + + + + + + + + +
FieldDescription
+path
+ +string + +
+
+exclude
+ +bool + +
+
+

GitRepositoryGeneratorFileItem +

+

+(Appears on: +GitRepositoryGenerator) +

+

GitRepositoryGeneratorFileItem defines a path to a file to be parsed when generating.

+ + + + + + + + + + + + + +
FieldDescription
+path
+ +string + +
+

Path is the name of a file to read and generate from can be JSON or YAML.

+
+

HeadersReference +

+

+(Appears on: +APIClientGenerator) +

+

HeadersReference references either a Secret or ConfigMap to be used for +additional request headers.

+ + + + + + + + + + + + + + + + + +
FieldDescription
+kind
+ +string + +
+

The resource kind to get headers from.

+
+name
+ +string + +
+

Name of the resource in the same namespace to apply headers from.

+
+

ListGenerator +

+

+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +

+

ListGenerator generates from a hard-coded list.

+ + + + + + + + + + + + + +
FieldDescription
+elements
+ + +[]Kubernetes pkg/apis/apiextensions/v1.JSON + + +
+
+

MatrixGenerator +

+

+(Appears on: +GitOpsSetGenerator) +

+

MatrixGenerator defines a matrix that combines generators. +The matrix is a cartesian product of the generators.

+ + + + + + + + + + + + + +
FieldDescription
+generators
+ + +[]GitOpsSetNestedGenerator + + +
+

Generators is a list of generators to be combined.

+
+

PullRequestGenerator +

+

+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +

+

PullRequestGenerator defines a generator that queries a Git hosting service +for relevant PRs.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

The interval at which to check for repository updates.

+
+driver
+ +string + +
+

Determines which git-api protocol to use.

+
+serverURL
+ +string + +
+(Optional) +

This is the API endpoint to use.

+
+repo
+ +string + +
+

This should be the Repo you want to query. +e.g. my-org/my-repo

+
+secretRef
+ + +Kubernetes core/v1.LocalObjectReference + + +
+

Reference to Secret in same namespace with a field “password” which is an +auth token that can query the Git Provider API.

+
+labels
+ +[]string + +
+(Optional) +

Labels is used to filter the PRs that you want to target. +This may be applied on the server.

+
+forks
+ +bool + +
+(Optional) +

Fork is used to filter out forks from the target PRs if false, +or to include forks if true

+
+

ResourceInventory +

+

+(Appears on: +GitOpsSetStatus) +

+

ResourceInventory contains a list of Kubernetes resource object references that have been applied by a Kustomization.

+ + + + + + + + + + + + + +
FieldDescription
+entries
+ + +[]ResourceRef + + +
+

Entries of Kubernetes resource object references.

+
+

ResourceRef +

+

+(Appears on: +ResourceInventory) +

+

ResourceRef contains the information necessary to locate a resource within a cluster.

+ + + + + + + + + + + + + + + + + +
FieldDescription
+id
+ +string + +
+

ID is the string representation of the Kubernetes resource object’s metadata, +in the format ‘namespace_name_group_kind’.

+
+v
+ +string + +
+

Version is the API version of the Kubernetes resource object’s kind.

+
+
+

This page was automatically generated with gen-crd-api-reference-docs

+
diff --git a/website/versioned_docs/version-0.24.0/gitopssets/api-reference.mdx b/website/versioned_docs/version-0.24.0/gitopssets/api-reference.mdx new file mode 100644 index 0000000000..94c3b31de6 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitopssets/api-reference.mdx @@ -0,0 +1,10 @@ +--- +title: API reference +hide_title: true +--- + +import GeneratedAPI from './_api.mdx'; +import apiToc from './_api-toc.json'; +export const toc = apiToc; + + diff --git a/website/versioned_docs/version-0.24.0/gitopssets/guide.mdx b/website/versioned_docs/version-0.24.0/gitopssets/guide.mdx new file mode 100644 index 0000000000..8a4ce16427 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitopssets/guide.mdx @@ -0,0 +1,820 @@ +--- +title: Guide +hide_title: true +--- + +# Guide + +## Basics + +Currently rendering templates operates in two phases: + +- Generate all template parameters from the configured generators +- Render all the templates for each set of template parameters + +Please read the [security information](#security) below before using this. + +## Generation + +The simplest generator is the `List` generator. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: gitopsset-sample +spec: + generators: + - list: + elements: + - env: dev + team: dev-team + - env: production + team: ops-team + - env: staging + team: ops-team +``` + +The elements in there are a set JSON of objects[^yaml], there are three in this example, and each of them has two keys, `env` and `team`. + +Other generators provide different sets of keys and values. + +The [generators](#generators) documentation below provides more information on what the other generators output. + +## Rendering templates + +Templates are Kubernetes resources in YAML format. + +Each template is rendered for each element generated by the generators. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: gitopsset-sample +spec: + generators: + - list: + elements: + - env: dev + team: dev-team + - env: production + team: ops-team + - env: staging + team: ops-team + templates: + - content: + kind: Kustomization + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + metadata: + name: "{{ .Element.env }}-demo" + labels: + app.kubernetes.io/name: go-demo + app.kubernetes.io/instance: "{{ .Element.env }}" + com.example/team: "{{ .Element.team }}" + spec: + interval: 5m + path: "./examples/kustomize/environments/{{ .Element.env }}" + prune: true + sourceRef: + kind: GitRepository + name: go-demo-repo +``` + +The generated elements are provided to the template in the `Element` scope, so +`.Element.dev` refers to the `dev` field from the List element. + +The output from all generators is exposed in the `Element` scope, not just List +generators. + +## Repeating templates + +The output from a generator is an array of JSON objects[^yaml], the keys of which can contain repeating elements, either further JSON objects, or scalar values. + +It can be desirable to repeat a template for a repeated element in a generated +value. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: repeated-gitopsset-sample +spec: + generators: + - list: + elements: + - env: dev + team: dev-team + teams: + - name: "team1" + - name: "team2" + - name: "team3" + - env: staging + team: staging-team + teams: + - name: "team4" + - name: "team5" + - name: "team6" + templates: + - repeat: "{ .teams }" + content: + kind: ConfigMap + apiVersion: v1 + metadata: + name: "{{ .Repeat.name }}-demo" + data: + name: "{{ .Repeat.name }}-demo" + team: "{{ .Element.team }}" +``` + +The template `repeat` field is a [JSONPath](https://kubernetes.io/docs/reference/kubectl/jsonpath/) expression that is applied to each element during the template rendering. + +Templates that use `repeat` will have two separate scopes for the template params, `.Element` which is the top-level element generated by the generator, and the additional `.Repeat` scope, which is the repeating element. + +In this case, six different `ConfigMaps` are generated, three for the "dev-team" and three for the "staging-team". + +## Generators + +We currently provide these generators: + - [list](#list-generator) + - [pullRequests](#pullrequests-generator) + - [gitRepository](#gitrepository-generator) + - [matrix](#matrix-generator) + - [apiClient](#apiclient-generator) + - [cluster](#cluster-generator) + +### List generator + +This is the simplest generator, which is a hard-coded array of JSON objects, described as YAML mappings. + +### GitRepository generator + +The `GitRepository` generator operates on [Flux GitRepositories](https://fluxcd.io/flux/components/source/gitrepositories/). + +When a `GitRepository` is updated, this will trigger a regeneration of templates. + +The generator operates in two different ways, you can parse files (YAML or JSON) into Elements, or you can scan directories for subdirectories. + +#### Generation from files + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: repository-sample +spec: + generators: + - gitRepository: + repositoryRef: go-demo-repo + files: + - path: examples/generation/dev.yaml + - path: examples/generation/production.yaml + - path: examples/generation/staging.yaml + templates: + - content: + kind: Kustomization + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + metadata: + name: "{{ .Element.env }}-demo" + labels: + app.kubernetes.io/name: go-demo + app.kubernetes.io/instance: "{{ .Element.env }}" + com.example/team: "{{ .Element.team }}" + spec: + interval: 5m + path: "./examples/kustomize/environments/{{ .Element.env }}" + prune: true + sourceRef: + kind: GitRepository + name: go-demo-repo +``` + +In this example, a [Flux `GitRepository`](https://fluxcd.io/flux/components/source/gitrepositories/) called `go-demo-repo` in the same namespace as the `GitOpsSet` will be tracked, and `Kustomization` resources will be generated from the three files listed. + +These files can be JSON or YAML. + +In this example we expect to find the following structure in the files: + +```yaml +env: dev +team: developers +``` + +Changes pushed to the `GitRepository` will result in rereconciliation of the templates into the cluster. + +For security reasons, you need to explicitly list out the files that the generator should parse. + +#### Generation from directories + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + labels: + app.kubernetes.io/name: gitopsset + app.kubernetes.io/instance: gitopsset-sample + app.kubernetes.io/part-of: gitopssets-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: gitopssets-controller + name: repository-sample +spec: + generators: + - gitRepository: + repositoryRef: go-demo-repo + directories: + - path: examples/kustomize/environments/* + templates: + - content: + kind: Kustomization + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + metadata: + name: "{{ .Element.Base }}-demo" + labels: + app.kubernetes.io/name: go-demo + app.kubernetes.io/instance: "{{ .Element.Base }}" + com.example/team: "{{ .Element.Base }}" + spec: + interval: 5m + path: "{{ .Element.Directory }}" + prune: true + sourceRef: + kind: GitRepository + name: go-demo-repo +``` +In this example, a [Flux `GitRepository`](https://fluxcd.io/flux/components/source/gitrepositories/) called `go-demo-repo` in the same namespace as the `GitOpsSet` will be tracked, and `Kustomization` resources are generated from paths within the `examples/kustomize/environments/*` directory within the repository. + +Each generated element has two keys, `.Element.Directory` which will be a repo-relative path and `.Element.Base` which contains the last element of the path, for example, for a directory `./examples/kustomize/environments/production` this will be `production`. + +It is also possible to exclude paths from the generated list, for example, if you do not want to generate for a directory you can exclude it with: +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: repository-sample +spec: + generators: + - gitRepository: + repositoryRef: go-demo-repo + directories: + - path: examples/kustomize/environments/* + - path: examples/kustomize/environments/production + exclude: true + templates: + - content: +``` +In this case, all directories that are subdirectories of `examples/kustomize/environments` will be generated, **but** not `examples/kustomize/environments/production`. + +**Note**: The directory tree detection is restricted to the same directory as the path, no recursion is done. + +In fact the path is treated as a [Glob](https://pkg.go.dev/path/filepath#Glob). + +### PullRequests generator + +This will require to make authenticated requests to your Git hosting provider e.g. GitHub, GitLab, Bitbucket etc. + +It does only require read-only access, but all API tokens should be guarded as carefully as possible, what is a "read-only" token today, might become a token with higher-privilege in the future. + +_There have been many security compromises using API access tokens, do not let this happen to you!_ + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: pull-requests-sample +spec: + generators: + - pullRequests: + interval: 5m + driver: github + repo: bigkevmcd/go-demo + secretRef: + name: github-secret + templates: + - content: + apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: GitRepository + metadata: + name: "pr-{{ .Element.Number }}-gitrepository" + namespace: default + spec: + interval: 5m0s + url: "{{ .Element.CloneURL }}" + ref: + branch: "{{ .Element.Branch }}" + - content: + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: "pr-{{ .Element.Number }}-demo" + namespace: default + spec: + interval: 5m + path: "./examples/kustomize/environments/dev" + prune: true + targetNamespace: "{{ .Element.Branch }}-ns" + sourceRef: + kind: GitRepository + name: "pr-{{ .Element.Number }}-gitrepository" +``` + +This example will poll "github.com/bigkevmcd/go-demo" for open pull requests and trigger the deployment of these by creating a Flux `GitRepository` and a `Kustomization` to deploy. + +As the generator only queries open pull requests, when a PR is closed, the generated resources will be removed. + +For non-public installations, you can configure the `serverURL` field and point it to your own installation. + +The `driver` field can be `github` or `gitlab` or `bitbucketserver`, other options can be supported from [go-scm](https://github.com/jenkins-x/go-scm/blob/main/scm/factory/factory.go). + +The `forks` flag field can be used to indicate whether to include forks in the target pull requests or not. If set to `true` any pull request from a fork repository will be included, otherwise if `false` or not indicated the pull requests from fork repositories are discarded. + +Additionally labels can be provided for querying pull requests with matching labels e.g. + +```yaml +- pullRequests: + interval: 5m + driver: github + repo: bigkevmcd/go-demo + secretRef: + name: github-secret + forks: false + labels: + - deploy +``` + +The fields emitted by the pull-request are as follows: + +- `Number` this is generated as a string representation +- `Branch` this is the source branch +- `HeadSHA` this is the SHA of the commit in the merge branch +- `CloneURL` this is the HTTPS clone URL for this repository +- `CloneSSHURL` this is the SSH clone URL for this repository +- `Fork` this indicates whether the pull request is from a fork (true) or not (false) + +Create a read-only token that can list Pull Requests, and store it in a secret: + +```shell +$ kubectl create secret generic github-secret \ + --from-literal password= +``` + +### Matrix generator + +The matrix generator doesn't generate resources by itself. It combines the results of +generation from other generators e.g.: + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: matrix-sample +spec: + generators: + - matrix: + generators: + - gitRepository: + repositoryRef: go-demo-repo + files: + - path: examples/generation/dev.yaml + - path: examples/generation/production.yaml + - path: examples/generation/staging.yaml + - list: + elements: + - cluster: dev-cluster + version: 1.0.0 +``` + +Given the files mentioned all have the following structure: + +```yaml +env: dev +team: developers +``` + +This will result in three sets of generated parameters, which are a combination of the maps in the files in the gitRepository, and the elements in the list generator, this can result in a combinatorial explosion of resources being created in your cluster. + +```yaml +- env: dev + team: developers + cluster: dev-cluster + version: 1.0.0 +- env: staging + team: staging-team + cluster: dev-cluster + version: 1.0.0 +- env: production + team: production-team + cluster: dev-cluster + version: 1.0.0 +``` + +These can be referenced in the templates, note that all keys in the merged generators from the Matrix are contained in the `Element` scope. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: matrix-sample +spec: + generators: + - matrix: + generators: + - gitRepository: + repositoryRef: go-demo-repo + files: + - path: examples/generation/dev.yaml + - path: examples/generation/production.yaml + - path: examples/generation/staging.yaml + - list: + elements: + - cluster: dev-cluster + version: 1.0.0 + templates: + - content: + kind: Kustomization + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + metadata: + name: "{{ .Element.env }}-demo" + labels: + app.kubernetes.io/name: go-demo + app.kubernetes.io/instance: "{{ .Element.env }}" + com.example/team: "{{ .Element.team }}" + com.example/cluster: "{{ .Element.cluster }}" + com.example/version: "{{ .Element.version }}" + spec: + interval: 5m + path: "./examples/kustomize/environments/{{ .Element.env }}" + prune: true + sourceRef: + kind: GitRepository + name: go-demo-repo +``` + +### apiClient generator + +This generator is configured to poll an HTTP endpoint and parse the result as the generated values. + +This will poll an endpoint on the interval, instead of using the simpler to use PullRequest generator, you can access GitHub's API with the APIClient generator. + +The PullRequest generator is simpler to use, and works across multiple different git-providers. + +The GitHub [documentation](https://docs.github.com/en/rest/pulls/pulls?apiVersion=2022-11-28#list-pull-requests) for the API endpoint shows: + +```shell +curl \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer "\ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/OWNER/REPO/pulls +``` +This can be translated into... +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + labels: + app.kubernetes.io/name: gitopsset + app.kubernetes.io/instance: gitopsset-sample + app.kubernetes.io/part-of: gitopssets-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: gitopssets-controller + name: api-client-sample +spec: + generators: + - apiClient: + interval: 5m + endpoint: https://api.github.com/repos/bigkevmcd/go-demo/pulls + headersRef: + name: github-secret + kind: Secret + templates: + - content: + apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: GitRepository + metadata: + name: "pr-{{ .Element.id | toJson}}-gitrepository" + namespace: default + spec: + interval: 5m0s + url: "{{ .Element.head.repo.clone_url }}" + ref: + branch: "{{ .Element.head.ref }}" + - content: + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: "pr-{{ .Element.id | toJson }}-demo" + namespace: default + spec: + interval: 5m + path: "./examples/kustomize/environments/dev" + prune: true + targetNamespace: "{{ .Element.head.ref }}-ns" + sourceRef: + kind: GitRepository + name: "pr-{{ .Element.id | toJson }}-gitrepository" +``` +As with the [Pull Request generator](#pullrequests-generator), this also requires a secret token to be able to access the API + +We need to pass this as an HTTP header. +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: github-secret + namespace: default +type: Opaque +stringData: + Accept: application/vnd.github+json + Authorization: Bearer ghp_ + X-GitHub-Api-Version: "2022-11-28" +``` +The keys in the secret match the command-line example using curl. + +Unlike the Pull Request generator, you need to figure out the paths to the elements yourself. + +#### APIClient JSONPath + +Not all APIs return an array of JSON objects, sometimes it's nested within a result type structure e.g. + +```json +{ + "things": [ + { + "env": "dev", + "team": "dev-team" + }, + { + "env": "production", + "team": "opts-team" + }, + { + "env": "staging", + "team": "opts-team" + } + ] +} +``` +You can use JSONPath to extract the fields from this data... +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + labels: + app.kubernetes.io/name: gitopsset + app.kubernetes.io/instance: gitopsset-sample + app.kubernetes.io/part-of: gitopssets-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: gitopssets-controller + name: api-client-sample +spec: + generators: + - apiClient: + interval: 5m + endpoint: https://api.example.com/demo + jsonPath: "{ $.things }" +``` +This will generate three maps for templates, with just the _env_ and _team_ keys. + +#### APIClient POST body + +Another piece of functionality in the APIClient generator is the ability to POST +JSON to the API. +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + labels: + app.kubernetes.io/name: gitopsset + app.kubernetes.io/instance: gitopsset-sample + app.kubernetes.io/part-of: gitopssets-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: gitopssets-controller + name: api-client-sample +spec: + generators: + - apiClient: + interval: 5m + endpoint: https://api.example.com/demo + body: + name: "testing" + value: "testing2" +``` +This will send a request body as JSON (Content-Type "application/json") to the +server and interpret the result. + +The JSON body sent will look like this: +```json +{"name":"testing","value":"testing2"} +``` + +#### APIClient simple results + +Instead of using the JSONPath to extract from a complex structure, you can configure the result to be a single element. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + labels: + app.kubernetes.io/name: gitopsset + app.kubernetes.io/instance: gitopsset-sample + app.kubernetes.io/part-of: gitopssets-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: gitopssets-controller + name: api-client-sample +spec: + generators: + - apiClient: + singleElement: true + interval: 5m + endpoint: https://api.example.com/demo +``` +Whatever result is parsed from the API endpoint will be returned as a map in a single element. + +For generation, you might need to use the `repeat` mechanism to generate repeating results. + +### Cluster generator + +The cluster generator generates from in-cluster GitOpsCluster resources. + +For example, this `GitOpsSet` will generate a `Kustomization` resource for each cluster matching the [Label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: cluster-sample +spec: + generators: + - cluster: + selector: + matchLabels: + env: dev + team: dev-team + templates: + - content: + kind: Kustomization + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + metadata: + name: "{{ .Element.ClusterName }}-demo" + labels: + app.kubernetes.io/name: go-demo + app.kubernetes.io/instance: "{{ .Element.ClusterName }}" + com.example/team: "{{ .Element.ClusterLabels.team }}" + spec: + interval: 5m + path: "./examples/kustomize/environments/{{ .Element.ClusterLabels.env }}" + prune: true + sourceRef: + kind: GitRepository + name: go-demo-repo +``` + +The following fields are generated for each GitOpsCluster. + + - `ClusterName` the name of the cluster + - `ClusterNamespace` the namespace that this cluster is from + - `ClusterLabels` the labels from the metadata field on the GitOpsCluster + - `ClusterAnnotations` the annotations from the metadata field on the GitOpsCluster + +If the selector is not provided, all clusters from all namespaces will be returned: + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: cluster-sample +spec: + generators: + - cluster: {} +``` + +Otherwise if the selector is empty, no clusters will be generated: + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: cluster-sample +spec: + generators: + - cluster: + selector: {} +``` + +## Templating functions + +Currently, the [Sprig](http://masterminds.github.io/sprig/) functions are available in the templating, with some functions removed[^sprig] for security reasons. + +In addition, we also provide two additional functions: + + * sanitize - sanitises strings to be compatible with [Kubernetes DNS](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) name requirements + * getordefault - gets a key from the `.Element` or defaults to another value. + +The examples below assume an element that looks like this: +```json +{ + "team": "engineering dev" +} +``` + +### sanitize template function + +And a template that looks like this: +```yaml +kind: Service +metadata: + name: {{ sanitize .Element.team }}-demo +``` + +This would output: +```yaml +kind: Service +metadata: + name: engineeringdev-demo +``` + +### getordefault + +For template that looks like this: +```yaml +kind: Service +metadata: + name: {{ getordefault .Element "name" "defaulted" }}-demo +``` + +This would output: +```yaml +kind: Service +metadata: + name: defaulted-demo +``` + +If the _key_ to get does exist in the `.Element` it will be inserted, the "default" is only inserted if it doesn't exist. + +## Security + +**WARNING** generating resources and applying them directly into your cluster can be dangerous to the health of your cluster. + +This is especially true for the `GitRepository` generator, where it may not be obvious to the author of the files, or the author of the template the consequences of the template rendering. + +The default `ServiceAccount` that is used by the gitopssets-controller is extremely limited, and can not create resources, you will need to explicitly grant permissions to create any of the resources you declare in the template, missing permissions will appear in the controller logs. + +It is not recommended that you create a role with blanket permissions, under the right circumstances, someone could accidentally _or_ maliciously overwrite the cluster control-plane, which could be very dangerous. + +## Limiting via service-accounts + +You can configure the service-account that is used to create resources. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsSet +metadata: + name: matrix-sample +spec: + # the controller will impersonate this service account + serviceAccountName: test-sa + generators: + - list: + elements: + - env: dev + team: dev-team + - env: production + team: ops-team + - env: staging + team: ops-team + templates: + - content: + kind: Kustomization + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + metadata: + name: "{{ .Element.env }}-demo" + labels: + app.kubernetes.io/name: go-demo + app.kubernetes.io/instance: "{{ .Element.env }}" + com.example/team: "{{ .Element.team }}" + spec: + interval: 5m + path: "./examples/kustomize/environments/{{ .Element.env }}" + prune: true + sourceRef: + kind: GitRepository + name: go-demo-repo +``` + +## gitopsset-controller configuration + +The enabled generators can be configured via the `--enabled-generators` flag, which takes a comma separated list of generators to enable. + +The default is to enable all generators. + +For example to enable only the `List` and `GitRepository` generators: + +```yaml +--enabled-generators=List,GitRepository +``` + +When a GitOpsSet that uses disabled generators is created, the disabled generators will be silently ignored. + +[^yaml]: These are written as YAML mappings +[^sprig]: The following functions are removed "env", "expandenv", "getHostByName", "genPrivateKey", "derivePassword", "sha256sum", "base", "dir", "ext", "clean", "isAbs", "osBase", "osDir", "osExt", "osClean", "osIsAbs" diff --git a/website/versioned_docs/version-0.24.0/gitopssets/installation.mdx b/website/versioned_docs/version-0.24.0/gitopssets/installation.mdx new file mode 100644 index 0000000000..f9c8137bd4 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitopssets/installation.mdx @@ -0,0 +1,63 @@ +--- +title: Installation +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Installation + +The gitopssets-controller can be installed in two ways: + +- As part of the Weave Gitops Enterprise installation. (installed by default) +- As a standalone installation using a Helm chart. + +The standalone installation can be useful for leaf clusters that don't have Weave Gitops Enterprise installed. + +## Prerequisites + +Before installing the gitopssets-controller, ensure that the following is installed: + +- flux + +## Installing the gitopssets-controller + +To install the gitopssets-controller using a Helm chart, use the following HelmRelease: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: gitopssets-system +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository +metadata: + name: weaveworks-artifacts-charts + namespace: gitopssets-system +spec: + interval: 1m + url: https://artifacts.wge.dev.weave.works/dev/charts +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: gitopssets-controller + namespace: gitopssets-system +spec: + interval: 10m + chart: + spec: + chart: gitopssets-controller + sourceRef: + kind: HelmRepository + name: weaveworks-artifacts-charts + namespace: gitopssets-system + version: 0.6.1 + install: + crds: CreateReplace + upgrade: + crds: CreateReplace +``` + +After adding the Namespace, HelmRepository and HelmRelease to a git repository synced by flux, commit the changes to complete the installation process. diff --git a/website/versioned_docs/version-0.24.0/gitopssets/intro.mdx b/website/versioned_docs/version-0.24.0/gitopssets/intro.mdx new file mode 100644 index 0000000000..6a89eac327 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitopssets/intro.mdx @@ -0,0 +1,42 @@ +--- +title: Introduction +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# GitOpsSets + +:::caution + +**This feature is in alpha and certain aspects will change** + +We're very excited for people to use this feature. +However, please note that some changes will be made to the API and behavior, +particularly to enhance security by implementing impersonation for more +fine-grained control over how the generated resources are applied. + +::: + +## Introduction + +GitOpsSets enable Platform Operators to have a single definition for an application for multiple environments and a fleet of clusters. A single definition can be used to generate the environment and cluster-specific configuration. + +As an example, we can take an application that needs to be deployed to various environments (Dev, Test, Prod) built by a fleet of clusters. Each of those environments + clusters requires a specialized configuration powering the same Application. With GitOpsSets and the generators you just declare the template, you want to use, the selector that will match the cluster of the inventory, and where to get the special configuration. + +GitOpsSets will create out of the single resource all the objects and Flux primitives that are required to successfully deploy this application. An operation that required the editing of 100s files can be done now with a single command. + +**The initial generators that are coming with the preview release are:** + +- [List Generator](./guide.mdx#list-generator): The simplest generator. Provide a list of Key/Value pairs that you want to feed the template with. +- [Git Generator](./guide.mdx#gitrepository-generator): Enable to extract a set of files (environment-specific configurations) from a Flux GitRepository, and make the contents of these available to the templates, this would let you have config in *app-dev.json*, *app-staging.json* and *app-production.json* for example, and the contents of these would be available to the templates. +- [Matrix Generator](./guide.mdx#matrix-generator): Combine slices of generators into the desired compounded input. +- [Pull request Generator](./guide.mdx#pullrequests-generator): Automatically discover open pull requests within a repository to generate a new deployment. +- [API Client Generator](./guide.mdx#apiclient-generator): Poll an HTTP endpoint and parse the result as the generated values. + + +## Use cases + +- Single application definition for different environments (EU-West, North America, Germany) +- Deployment of a single definition across fleet of clusters matching any cluster based on a label (Production) +- Separation of concerns between teams (Teams managing different artifacts flowing into a single definition via generators) diff --git a/website/versioned_docs/version-0.24.0/gitopssets/releases.mdx b/website/versioned_docs/version-0.24.0/gitopssets/releases.mdx new file mode 100644 index 0000000000..fd29ef3034 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/gitopssets/releases.mdx @@ -0,0 +1,24 @@ +--- +title: Releases +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Gitopssets Controller Releases + +## v0.8.0 +2023-04-13 + +- Add events recording to gitopssets +- Fix updating of ConfigMaps + +## v0.7.0 +2023-03-30 + +- Implement custom delimiters. + +## v0.6.1 +2023-03-20 + +- Implement optional list expansion \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/guides/assets/templates/capa-template.yaml b/website/versioned_docs/version-0.24.0/guides/assets/templates/capa-template.yaml new file mode 100644 index 0000000000..e727e654e5 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/assets/templates/capa-template.yaml @@ -0,0 +1,92 @@ +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: aws-eks-dev + namespace: default + annotations: + templates.weave.works/inject-prune-annotation: "true" + templates.weave.works/add-common-bases: "true" + labels: + weave.works/template-type: cluster +spec: + description: AWS EKS Development Cluster + params: + - name: CLUSTER_NAME + description: The name for this cluster. + - name: AWS_REGION + description: AWS Region to create cluster + options: ["us-east-1", "eu-central-1", "eu-west-2", "us-west-2"] + - name: KUBERNETES_VERSION + description: EKS Kubernetes version to use + options: ["v1.19.8", "v1.20.7", "v1.21.2"] + - name: WORKER_MACHINE_COUNT + description: Number of worker nodes to create. + resourcetemplates: + - contents: + - apiVersion: gitops.weave.works/v1alpha1 + kind: GitopsCluster + metadata: + name: "${CLUSTER_NAME}" + namespace: default + labels: + weave.works/capi: bootstrap + spec: + capiClusterRef: + name: "${CLUSTER_NAME}" + + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + name: ${CLUSTER_NAME} + namespace: default + labels: + weave.works/capi: bootstrap + spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: AWSManagedControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: AWSManagedControlPlane + name: ${CLUSTER_NAME}-control-plane + + - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: AWSManagedControlPlane + metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default + spec: + region: ${AWS_REGION} + sshKeyName: default + version: ${KUBERNETES_VERSION} + eksClusterName: ${CLUSTER_NAME} + + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachinePool + metadata: + name: ${CLUSTER_NAME}-pool-0 + namespace: default + spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AWSManagedMachinePool + name: ${CLUSTER_NAME}-pool-0 + + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AWSManagedMachinePool + metadata: + name: ${CLUSTER_NAME}-pool-0 + namespace: default + spec: {} diff --git a/website/versioned_docs/version-0.24.0/guides/cert-manager.md b/website/versioned_docs/version-0.24.0/guides/cert-manager.md new file mode 100644 index 0000000000..286b575ff8 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/cert-manager.md @@ -0,0 +1,106 @@ +--- +title: Generating TLS certificates with cert-manager and Let's Encrypt +--- + +In this guide we will show you how to add cert-manager to a cluster bootstrapped with Weave GitOps, and how +to configure the use of Let's Encrypt to issue TLS certificates. + +### Pre-requisites + +- A Kubernetes cluster such as [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) cluster running a +[Flux-supported version of Kubernetes](https://fluxcd.io/docs/installation/#prerequisites) +- Weave GitOps is [installed](../installation/index.mdx) + +## What is cert-manager? + +[cert-manager](https://cert-manager.io/), a CNCF project, provides a way to automatically manage certificates +in Kubernetes and OpenShift clusters. "It will obtain certificates from a variety of Issuers, both popular public +Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to +renew certificates at a configured time before expiry". + +## Install cert-manager + +As cert-manager can be installed using a [Helm Chart](https://cert-manager.io/docs/installation/helm/), we can +simply create a `HelmRepository` and a `HelmRelease` to have Flux install everything. + +Commit the following to a location being reconciled by Flux. + +
Expand to see manifest contents + +```yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 1h + url: https://charts.jetstack.io +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 5m + chart: + spec: + chart: cert-manager + version: 1.8.0 + sourceRef: + kind: HelmRepository + name: cert-manager + namespace: cert-manager + interval: 1m + values: + installCRDs: true +``` + +
+ +:::note cert-manager version +At time of writing, cert manager v1.8.0 was the latest available release and a newer version may exist, please +ensure to check for updates. +::: + +Now that `cert-manager` is running, we can create a `ClusterIssuer` to represent the certificate authority +from which we will obtain signed certificates, in this example we are using Let's Encrypt. After changing +the email address, commit this to the same location as above. + +
Expand to see manifest contents + +```yaml +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: weave-gitops@example.tld + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: letsencrypt-prod-account-key + solvers: + # Add a single challenge solver, HTTP01 using nginx + - http01: + ingress: + class: nginx +``` + +
+ +Once this `ClusterIssuer` resource is installed, the cluster is now configured to request and use certificates generated by Cert Manager. + +This could be manually requested through the creation of a [Certificate resource](https://cert-manager.io/docs/usage/certificate/#creating-certificate-resources) or configured to be automatic as shown in our [Configuring OIDC with Dex and GitHub](./setting-up-dex.md) guide. diff --git a/website/versioned_docs/version-0.24.0/guides/delivery.mdx b/website/versioned_docs/version-0.24.0/guides/delivery.mdx new file mode 100644 index 0000000000..55c199c935 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/delivery.mdx @@ -0,0 +1,656 @@ +--- +title: Progressive Delivery using Flagger +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +[Flagger](https://docs.flagger.app/) is a progressive delivery operator for Kubernetes. It is +designed to reduce risks when introducing new software versions and to improve time to delivery +through automating production releases. Weave GitOps Enterprise's UI allows you to view the state of +these progressive delivery rollouts, and how they are configured using Flagger's +[canary](https://docs.flagger.app/usage/how-it-works#canary-resource) object, through the +Applications > Delivery view. + +![Applications Delivery view](/img/dashboard-applications-delivery.png) + +This guide uses Flux manifests to install Flagger and Linkerd. Flagger can work with a number of +service meshes and ingress controllers, to support various progressive delivery [deployment +strategies](https://docs.flagger.app/usage/deployment-strategies): + +![canary release icon](/img/canary.svg) **Canary Release** - where traffic is gradually shifted to +the new version and its performance is assessed. Based on this analysis of KPIs a release is either +promoted or the update abandoned. +![a b testing icon](/img/ab.svg) **A/B Testing** - uses HTTP headers or cookies to ensure users + stay on the same version of an application during the canary analysis. +![blue green testing icon](/img/blue-green.svg) **Blue/Green** - where tests are run against the + new version, and if successful, traffic is then switched from the current service. +![blue green mirroring icon](/img/mirroring.svg) **Blue/Green with Traffic Mirroring** - sends + copies of incoming requests to the new version. The user receives the response from the current + service and the other is discarded. The new version is promoted only if metrics are healthy. + +Using Flux allows us to manage our cluster applications in a declarative way through changes in a +Git repository. + +In this guide, we will walk you through a full end-to-end scenario where you will: +- [Install the Linkerd service mesh](#installing-linkerd-using-flux) +- [Install Flagger](#installing-flagger-using-flux) +- [Deploy a sample application using a canary release strategy based on metrics provided through + Linkerd's in-built Prometheus instance](#deploy-a-canary-release) + +## Prerequisites +- This guide assumes you already have a Kubernetes cluster running and have bootstrapped Flux. To + apply the manifests listed in this guide, you will need to commit them to a repository being + reconciled with Flux. For help installing Flux, you can follow their [getting + started](https://fluxcd.io/docs/get-started/) documentation. +- Flagger requires the `autoscaling/v2` or `autoscaling/v2beta2` API to be installed on the cluster, you can use `kubectl + api-resources` to check which API versions are supported. +- The [step](https://smallstep.com/cli/) CLI installed to generate certificates in order to support + mTLS connections. + +## Installing Linkerd using Flux + +For the Linkerd installation, a Kustomization file will be used. This will allow us to specify the +installation order and the default namespace for the installed resources but also to easily generate +Secrets from certificate files via the use of a `secretGenerator`. + +In order to support mTLS connections between meshed pods, Linkerd requires a trust anchor +certificate and an issuer certificate with its corresponding key. These certificates are +automatically created when the `linkerd install` command is used but when using a Helm chart to +install Linkerd, these certificates need to be provided. The `step` CLI allows us to generate these +certificates. + +To generate the trust anchor certificate run: +```bash +step certificate create root.linkerd.cluster.local ca.crt ca.key \ +--profile root-ca --no-password --insecure +``` + +To generate the issuer certificate run: +```bash +step certificate create identity.linkerd.cluster.local issuer.crt issuer.key \ +--profile intermediate-ca --not-after 8760h --no-password --insecure \ +--ca ca.crt --ca-key ca.key +``` + +Add the `ca.crt`, `issuer.crt` and `issuer.key` files to the cluster repository under a `linkerd` +directory. + +To control where the Linkerd components get installed, we need to add a Namespace resource: + +Now let's add the resources for Linkerd components under the `./linkerd` +directory. These are: +- A `Namespace` resource to control where the components are installed +- A `HelmRepository` resource to make the Linkerd helm repo available on the + cluster +- A `HelmRelease` resource to install the latest version of Linkerd from that + `HelmRepository` + +
Expand to see the Linkerd manifests + +```yaml title="linkerd/namespace.yaml" +--- +apiVersion: v1 +kind: Namespace +metadata: + name: linkerd + labels: + config.linkerd.io/admission-webhooks: disabled +``` + +```yaml title="linkerd/source.yaml" +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: linkerd +spec: + interval: 1h + url: https://helm.linkerd.io/stable +``` + +:::tip +The value for the `spec.values.identity.issuer.crtExpiry` field below depends on the parameter value +used during the creation of the issuer certificate previously. In this example, it should be set to +1 year from the certificate creation. +::: + +```yaml title="linkerd/releases.yaml" {35} +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: linkerd +spec: + interval: 10m + chart: + spec: + chart: linkerd2 + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: linkerd + install: + crds: Create + upgrade: + crds: CreateReplace + valuesFrom: + - kind: Secret + name: linkerd-certs + valuesKey: ca.crt + targetPath: identityTrustAnchorsPEM + - kind: Secret + name: linkerd-certs + valuesKey: issuer.crt + targetPath: identity.issuer.tls.crtPEM + - kind: Secret + name: linkerd-certs + valuesKey: issuer.key + targetPath: identity.issuer.tls.keyPEM + values: + installNamespace: false + identity: + issuer: + crtExpiry: "2023-07-18T20:00:00Z" # Change this to match generated certificate expiry date +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: linkerd-viz +spec: + interval: 10m + dependsOn: + - name: linkerd + chart: + spec: + chart: linkerd-viz + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: linkerd +``` + +
+ +Next, add the following file to instruct Kustomize to patch any `Secrets` that are referenced in +`HelmRelease` manifests, and add a `Kustomization` which references all the +other `linkerd` resource files. + +
Expand to see the linkerd Kustomization manifests + +```yaml title="linkerd/kustomizeconfig.yaml" +nameReference: + - kind: Secret + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease +``` + +```yaml title="linkerd/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: linkerd +configurations: +- kustomizeconfig.yaml +resources: +- namespace.yaml +- source.yaml +- releases.yaml +secretGenerator: + - name: linkerd-certs + files: + - ca.crt + - issuer.crt + - issuer.key +``` + +:::tip +The `secretGenerator` is used to generate Secrets from the generated files. +::: + +
+ +At this point `linkerd` directory in the cluster repository should look like this: + +```bash +> tree linkerd +linkerd +├── ca.crt +├── issuer.crt +├── issuer.key +├── kustomization.yaml +├── kustomizeconfig.yaml +├── namespace.yaml +├── releases.yaml +└── source.yaml +``` + +Once Flux reconciles this directory to the cluster, Linkerd should be installed. + +Before proceeding to the next step, check that all the Linkerd pods have started successfully: + +```bash +> kubectl get pods -n linkerd +NAME READY STATUS RESTARTS AGE +linkerd-destination-66d5668b-4mw49 4/4 Running 0 10m +linkerd-identity-6b4658c74b-6nc97 2/2 Running 0 10m +linkerd-proxy-injector-6b76789cb4-8vqj4 2/2 Running 0 10m + +> kubectl get pods -n linkerd-viz +NAME READY STATUS RESTARTS AGE +grafana-db56d7cb4-xlnn4 2/2 Running 0 10m +metrics-api-595c7b564-724ps 2/2 Running 0 10m +prometheus-5d4dffff55-8fscd 2/2 Running 0 10m +tap-6dcb89d487-5ns8n 2/2 Running 0 10m +tap-injector-54895654bb-9xn7k 2/2 Running 0 10m +web-6b6f65dbc7-wltdg 2/2 Running 0 10m +``` + +:::info Note +Make sure that any new directories that you add to the cluster repository as part of this guide, +are included in a path that Flux reconciles. +::: + + +## Installing Flagger using Flux + +For the Flagger installation, a Kustomization file will be used to define the installation order and +provide a default namespace for the installed resources. + +Create a new `flagger` directory and make sure it is under a repository path that Flux reconciles. + +We'll add the resources for Flagger under this directory. +These are: +- A `Namespace` resource to control where the components are installed +- A `HelmRepository` resource to make the Flagger helm repo available on the + cluster +- A `HelmRelease` resource to install the latest version of Flagger and the load + tester app, which is used to generate from that synthetic traffic during the + analysis phase, from that `HelmRepository` + +
Expand to see the Flagger resource manifests + +```yaml title="flagger/namespace.yaml" +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flagger +``` + +```yaml title="flagger/source.yaml" +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: flagger +spec: + interval: 1h + url: https://flagger.app +``` + +```yaml title="flagger/releases.yaml" +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: flagger +spec: + releaseName: flagger + install: + crds: Create + upgrade: + crds: CreateReplace + interval: 10m + chart: + spec: + chart: flagger + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: flagger + values: + metricsServer: http://prometheus.linkerd-viz:9090 + meshProvider: linkerd +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: loadtester +spec: + interval: 10m + chart: + spec: + chart: loadtester + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: flagger +``` + +
+ +Finally, add the following Kustomization file that references all the previous files that were +added: + +
Expand to see the Flagger Kustomization manifest + +```yaml title="flagger/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flagger +resources: +- namespace.yaml +- source.yaml +- releases.yaml +``` + +
+ +The `flagger` directory in the cluster repository should look like this: + +```bash +> tree flagger +flagger +├── kustomization.yaml +├── namespace.yaml +├── releases.yaml +└── source.yaml +``` + +Once Flux reconciles this directory to the cluster, Flagger and the load tester app should get +installed. + +Before proceeding to the next step, check that all the Flagger pods have started successfully: + +```bash +> kubectl get pods -n flagger +NAME READY STATUS RESTARTS AGE +flagger-7d456d4fc7-knf2g 1/1 Running 0 4m +loadtester-855b4d77f6-scl6r 1/1 Running 0 4m +``` + +## Deploy a canary release + +To demonstrate the progressive rollout of an application, +[podinfo](https://github.com/stefanprodan/podinfo) will be used. + +We will configure a [Canary release +strategy](https://docs.flagger.app/usage/deployment-strategies#canary-release), where Flagger will +scale up a new version of the application (the canary), alongside the existing version (the +primary), and gradually increase traffic to the new version in increments of 5%, up to a maximum of +50%. It will continuously monitor the new version for an acceptable request response rate and +average request duration. Based on this analysis, Flagger will either update the primary to the new +version, or abandon the promotion; then scale the canary back down to zero. + +Create a new `test` directory and add the following Canary resources under it: +- A `Namespace` resource to control where the components are installed +- A `Deployment` and `HorizontalPodAutoscaler` for the `podinfo` application +- A `Canary` resource which references the `Deployment` and + `HorizontalPodAutoscaler` resources (note that we do not need to define a + service resource, instead this is specified within the Canary definition and + created by Flagger) + +
Expand to see the Canary resource manifests + +```yaml title="test/namespace.yaml" +--- +apiVersion: v1 +kind: Namespace +metadata: + name: test + annotations: + linkerd.io/inject: enabled +``` + +```yaml title="test/deployment.yaml" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: podinfo + labels: + app: podinfo +spec: + minReadySeconds: 5 + revisionHistoryLimit: 5 + progressDeadlineSeconds: 60 + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: podinfo + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9797" + labels: + app: podinfo + spec: + containers: + - name: podinfod + image: ghcr.io/stefanprodan/podinfo:6.1.8 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 9898 + protocol: TCP + - name: http-metrics + containerPort: 9797 + protocol: TCP + - name: grpc + containerPort: 9999 + protocol: TCP + command: + - ./podinfo + - --port=9898 + - --port-metrics=9797 + - --grpc-port=9999 + - --grpc-service-name=podinfo + - --level=info + - --random-delay=false + - --random-error=false + env: + - name: PODINFO_UI_COLOR + value: "#34577c" + livenessProbe: + exec: + command: + - podcli + - check + - http + - localhost:9898/healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + exec: + command: + - podcli + - check + - http + - localhost:9898/readyz + initialDelaySeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 2000m + memory: 512Mi + requests: + cpu: 100m + memory: 64Mi + +--- +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: podinfo +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: podinfo + minReplicas: 2 + maxReplicas: 4 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + # scale up if usage is above + # 99% of the requested CPU (100m) + averageUtilization: 99 +``` + +```yaml title="test/canary.yaml" +--- +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + name: podinfo +spec: + # deployment reference + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: podinfo + # HPA reference (optional) + autoscalerRef: + apiVersion: autoscaling/v2beta2 + kind: HorizontalPodAutoscaler + name: podinfo + # the maximum time in seconds for the canary deployment + # to make progress before it is rollback (default 600s) + progressDeadlineSeconds: 60 + service: + # ClusterIP port number + port: 9898 + # container port number or name (optional) + targetPort: 9898 + analysis: + # schedule interval (default 60s) + interval: 30s + # max number of failed metric checks before rollback + threshold: 5 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + # Linkerd Prometheus checks + metrics: + - name: request-success-rate + # minimum req success rate (non 5xx responses) + # percentage (0-100) + thresholdRange: + min: 99 + interval: 1m + - name: request-duration + # maximum req duration P99 + # milliseconds + thresholdRange: + max: 500 + interval: 30s + # testing (optional) + webhooks: + - name: acceptance-test + type: pre-rollout + url: http://loadtester.flagger/ + timeout: 30s + metadata: + type: bash + cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token" + - name: load-test + type: rollout + url: http://loadtester.flagger/ + metadata: + cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/" +``` + +
+ +Finally, add a Kustomization file to apply all resources to the `test` namespace: + +
Expand to see the Canary Kustomization manifest + +```yaml title="test/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: test +resources: +- namespace.yaml +- deployment.yaml +- canary.yaml +``` + +
+ +At this point `test` directory in the cluster repository should look like this: + +```bash +> tree test +test +├── canary.yaml +├── deployment.yaml +├── kustomization.yaml +└── namespace.yaml +``` + +After a short time, the status of the canary object should be set to `Initialized`: + +![Canary rollout initialized](/img/pd-details-initialized.png) + +```bash +> kubectl get canary podinfo -n test +NAME STATUS WEIGHT LASTTRANSITIONTIME +podinfo Initialized 0 2022-07-22T12:37:58Z +``` + +Now trigger a new rollout by bumping the version of `podinfo`: + +```bash +> kubectl set image deployment/podinfo podinfod=ghcr.io/stefanprodan/podinfo:6.0.1 -n test +``` + +During the progressive rollout, the canary object reports on its current status: + + +![Canary rollout progressing](/img/pd-details-progressing.png) + +```bash +> kubectl get canary podinfo -n test +NAME STATUS WEIGHT LASTTRANSITIONTIME +podinfo Progressing 5 2022-07-22T12:41:57Z +``` + +After a short time the rollout is completed and the status of the canary object is set to +`Succeeded`: + +![Canary rollout succeeded](/img/pd-details-succeeded.png) + +```bash +> kubectl get canary podinfo -n test +NAME STATUS WEIGHT LASTTRANSITIONTIME +podinfo Succeeded 0 2022-07-22T12:47:58Z +``` + +## Summary + +Congratulations, you have now completed a progressive delivery rollout with Flagger and Linkerd +:tada: + +Next steps: +- Explore more of what [Flagger](https://flagger.app/) can offer +- Configure [manual approving](flagger-manual-gating.mdx) for progressive delivery deployments diff --git a/website/versioned_docs/version-0.24.0/guides/deploying-capa.mdx b/website/versioned_docs/version-0.24.0/guides/deploying-capa.mdx new file mode 100644 index 0000000000..44d2452971 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/deploying-capa.mdx @@ -0,0 +1,73 @@ +--- +title: Deploying CAPA with EKS +hide_title: true +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +

+ {frontMatter.title} +

+ +## Creating your first CAPA Cluster + +:::note BEFORE YOU START + +Make sure the following software is installed before continuing with these instructions: + +- `github cli` >= 2.3.0 [(source)](https://cli.github.com/) +- `kubectl` [(source)](https://kubernetes.io/docs/tasks/tools/#kubectl) +- `eksctl` [(source)](https://github.com/weaveworks/eksctl/releases) +- `aws cli` [(source)](https://aws.amazon.com/cli/) +- `clusterctl` >= v1.0.1 [(source)](https://github.com/kubernetes-sigs/cluster-api/releases) +- `clusterawsadm` >= v1.1.0 [(source)](https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases) + +The `AWS_ACCESS_KEY_ID`and `AWS_SECRET_ACCESS_KEY` of a user should be configured either via `aws configure` or exported in the current shell. +The `GITHUB_TOKEN` should be set as an environment variable in the current shell. It should have permissions to create Pull Requests against the cluster config repo. +::: + +If you've followed the [Installation guide](installation/weave-gitops-enterprise/index.mdx) you should have a management cluster ready to roll. + +### 1. Configure a capi provider + +See [Cluster API Providers](cluster-management/cluster-api-providers.mdx) page for more details on providers. He're we'll continue with `eks` and `capa` as an example. + +```bash +# Enable support for `ClusterResourceSet`s for automatically installing CNIs +export EXP_EKS=true +export EXP_MACHINE_POOL=true +export CAPA_EKS_IAM=true +export EXP_CLUSTER_RESOURCE_SET=true + +clusterctl init --infrastructure aws +``` + +### 2. Add a template + +See [CAPI Templates](gitops-templates/intro.mdx) page for more details on this topic. Once we load a template we can use it in the UI to create clusters! + +import CapaTemplate from "!!raw-loader!./assets/templates/capa-template.yaml"; + +Download the template below to your config repository path, then commit and push to your git origin. + + + {() => ( + + curl -o clusters/management/capi/templates/capa-template.yaml{" "} + {window.location.protocol}//{window.location.host} + {require("./assets/templates/capa-template.yaml").default} + + )} + + + + {CapaTemplate} + diff --git a/website/versioned_docs/version-0.24.0/guides/displaying-custom-metadata.mdx b/website/versioned_docs/version-0.24.0/guides/displaying-custom-metadata.mdx new file mode 100644 index 0000000000..c13055911c --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/displaying-custom-metadata.mdx @@ -0,0 +1,65 @@ +--- +title: Displaying Custom Metadata +--- + +Weave Gitops lets you add annotations with custom metadata to your +flux automations and sources, and they will be displayed in the main UI. + +For example, you might use this to add links to dashboards, issue +systems, or documentation and comments that you wish to be directly visible in +the GitOps UI. + +We will use the `podinfo` application that we installed in the [getting +started guide](../../getting-started/deploy) as an example. Open up the +podinfo kustomization and add annotations to it so it looks like this: + +```yaml title="./clusters/my-cluster/podinfo-kustomization.yaml" +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: podinfo + namespace: flux-system +// highlight-start + annotations: + metadata.weave.works/description: | + Podinfo is a tiny web application made with Go that showcases best practices of running microservices in Kubernetes. + Podinfo is used by CNCF projects like Flux and Flagger for end-to-end testing and workshops. + metadata.weave.works/grafana-dashboard: https://grafana.my-org.example.com/d/podinfo-dashboard +// highlight-end +spec: + interval: 5m0s + path: ./kustomize + prune: true + sourceRef: + kind: GitRepository + name: podinfo + targetNamespace: flux-system +``` + +Close the file, and commit and push your changes. + +Back in your GitOps dashboard, navigate to the 'Applications' tab and select the +`podinfo` kustomization. At the bottom of the 'Details' section you will see the +new 'Metadata' entries: + +![Application detail view showing custom metadata](/img/metadata-display.png) + +:::caution Restrictions + + * The annotation key **must** start with the domain + `metadata.weave.works`. Any other annotations will be ignored. + * The key that will be displayed is whatever you put after the + domain, title cased, and with dashes replaced with spaces. Above, + `metadata.weave.works/grafana-dashboard` was displayed as "Grafana Dashboard". + * The value can either be a link, or can be plain text. Newlines in + plain text will be respected. + * The key is subject to certain limitations that kubernetes imposes on + annotations, including: + - it must be shorter than 63 characters (not including + the domain) + - it must be an English alphanumeric character, or one of `-._`. + - See the [kubernetes documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set) + for the full list of restrictions. + +::: diff --git a/website/versioned_docs/version-0.24.0/guides/flagger-manual-gating.mdx b/website/versioned_docs/version-0.24.0/guides/flagger-manual-gating.mdx new file mode 100644 index 0000000000..0c95c1e8b9 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/flagger-manual-gating.mdx @@ -0,0 +1,146 @@ +--- +title: Manual Approval for Progressive Delivery Deployments +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +Weave GitOps Enterprise helps you understand the state of progressive delivery +updates to your applications with [Flagger](https://flagger.app). The Delivery +view shows all your deployed `Canary` objects and the status for how a rollout +is progressing. + +By default, Flagger will automatically promote a new version of an application +should it pass the defined checks during an analysis phase. However, you can +also configure [webhooks](https://docs.flagger.app/usage/webhooks) to enable +manual approvals for Flagger to proceed to the next phase of a rollout. + +In this guide we will show you how to get started with manually gating a +progressive delivery promotion with Flagger, using the in-built load tester as +a way to demonstrate and learn the capability so that you could configure +your own gates. + +## Pre-requisites +- Basic knowledge of [Flagger](https://flagger.app). +- An existing `Canary` object and target deployment. +- Flagger's load tester [installed](https://docs.flagger.app/usage/webhooks#load-testing) + +## Basic introduction to Webhooks and Gating +Flagger can be configured to work with several types of hooks which will be called at +given stages during a progressive delivery rollout. Some of these allow you to manually +gate whether a rollout proceeds at certain points: +- Before a new deployment is scaled up and canary analysis begins with `confirm-rollout`. +- Before traffic weight is increased with `confirm-traffic-increase`. +- Before a new version is promoted following successful canary analysis with `confirm-promotion`. + +Any URL can be used as a webhook target, it will approve if it returns with a +`200 OK` status code, and halt if it's `403 Forbidden`. + +The webhook will receive a JSON payload that can be unmarshaled as +`CanaryWebhookPayload`: + +```go +type CanaryWebhookPayload struct { + // Name of the canary + Name string `json:"name"` + + // Namespace of the canary + Namespace string `json:"namespace"` + + // Phase of the canary analysis + Phase CanaryPhase `json:"phase"` + + // Metadata (key-value pairs) for this webhook + Metadata map[string]string `json:"metadata,omitempty"` +} +``` + +For more information on Webhooks in Flagger, see the +[Flagger documentation](https://docs.flagger.app/usage/webhooks) + + +## Using Flagger's load tester to manually gate a promotion +To enable manual approval of a promotion we are going to configure the +`confirm-promotion` webhook to call a particular gate provided through +Flagger's included load tester. This is an easy way to experiment with +the capability using Flagger's included components. + +**Important note** +We strongly recommend that you DO NOT USE the load tester for manual gating +in a production environment. There is no auth on the load tester, so +anyone with access to the cluster would be able to open and close; and +the load tester has no storage, so if restarted - all gates would close. + +Instead, configure these webhooks for appropriate integration with a +tool of your choice such Jira, Slack, Jenkins, etc. + +### Configure the confirm-promotion webhook +In your Canary object, add the following in the `analysis` section: + +```yaml + analysis: + webhooks: + - name: "ask for confirmation" + type: confirm-promotion + url: http://flagger-loadtester.test/gate/check +``` + +This gate is closed by default. + +### Deploy a new version of your application +Trigger a Canary rollout by updating your target deployment/daemonset, for +example by bumping the container image tag. A full list of ways to trigger +a rollout is available +[here](https://docs.flagger.app/faq#how-to-retry-a-failed-release). + +You can watch the progression of a Canary in Weave GitOps Enterprise (WGE) +through the Applications > Delivery view: + +![Podinfo Canary progressing](/img/pd-table-progressing.png) + + +### Wait for the Canary analysis to complete +Once the Canary analysis has successfully completed, Flagger will call the +`confirm-promotion` webhook and change status to `WaitingPromotion` as you +can see in the screenshots below: + +![Podinfo Canary showing Waiting Promotion - table view](/img/pd-table-waiting.png) + +![Podinfo Canary showing Waiting Promotion - details view](/img/pd-details-waiting.png) + +### Open the gate +To open the gate and therefore confirm that you are happy for the new +version of your application to be promoted, we can exec into the load tester +container: + +``` +$ kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh + +# to open +> curl -d '{"name": "app","namespace":"test"}' http://localhost:8080/gate/open +``` + +Flagger will now proceed to promote the Canary version to the primary and +complete the progressive delivery rollout :tada: + +![Podinfo Canary succeeded - full events history](/img/pd-events-gate-passed.png) + +![Podinfo Canary succeeded - promoting](/img/pd-table-promoting.png) + +![Podinfo Canary succeeded - promoted](/img/pd-table-succeeded.png) + + +To manually close the gate again you can issue: + +``` +> curl -d '{"name": "app","namespace":"test"}' http://localhost:8080/gate/close +``` + +**References:** + +* This guide was informed by the +[Official Flagger documentation](https://docs.flagger.app/usage/webhooks#manual-gating) diff --git a/website/versioned_docs/version-0.24.0/guides/setting-up-dex.md b/website/versioned_docs/version-0.24.0/guides/setting-up-dex.md new file mode 100644 index 0000000000..00d92922fb --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/setting-up-dex.md @@ -0,0 +1,291 @@ +--- +title: Configuring OIDC with Dex and GitHub +--- + +In this guide we will show you how to enable users to login to the Weave GitOps dashboard by authenticating with their GitHub account. + +This example uses [Dex][tool-dex] and its GitHub connector, and assumes Weave GitOps has already been installed on a Kubernetes clusters. + +### Pre-requisites + +- A Kubernetes cluster such as [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) cluster running a +[Flux-supported version of Kubernetes](https://fluxcd.io/docs/installation/#prerequisites) +- Weave GitOps is [installed](../installation/index.mdx) and [TLS has been enabled](../configuration/tls.md). + +## What is Dex? + +[Dex][tool-dex] is an identity service that uses [OpenID Connect][oidc] to +drive authentication for other apps. + +Alternative solutions for identity and access management exist such as [Keycloak](https://www.keycloak.org/). + +[tool-dex]: https://dexidp.io/ +[oidc]: https://openid.net/connect/ + +## Create Dex namespace + +Create a namespace where Dex will be installed: + +```yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dex +``` + +## Add credentials + +There are a [lot of options][dex-connectors] available with Dex, in this guide we will +use the [GitHub connector][dex-github]. + +We can get a GitHub ClientID and Client secret by creating a +[new OAuth application][github-oauth]. + +![GitHub OAuth configuration](/img/guides/setting-up-dex/github-oauth-application.png) + +```bash +kubectl create secret generic github-client \ + --namespace=dex \ + --from-literal=client-id=${GITHUB_CLIENT_ID} \ + --from-literal=client-secret=${GITHUB_CLIENT_SECRET} +``` + +[dex-connectors]: https://dexidp.io/docs/connectors/ +[dex-github]: https://dexidp.io/docs/connectors/github/ +[github-oauth]: https://docs.github.com/en/developers/apps/building-oauth-apps/creating-an-oauth-app + +## Deploy Dex + +As we did before, we can use `HelmRepository` and `HelmRelease` objects to let +Flux deploy everything. + +
Expand to see resource manifests + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository +metadata: + name: dex + namespace: dex +spec: + interval: 1m + url: https://charts.dexidp.io +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: dex + namespace: dex +spec: + interval: 5m + chart: + spec: + chart: dex + version: 0.6.5 + sourceRef: + kind: HelmRepository + name: dex + namespace: dex + interval: 1m + values: + image: + tag: v2.31.0 + envVars: + - name: GITHUB_CLIENT_ID + valueFrom: + secretKeyRef: + name: github-client + key: client-id + - name: GITHUB_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: github-client + key: client-secret + config: + # Set it to a valid URL + issuer: https://dex.dev.example.tld + + # See https://dexidp.io/docs/storage/ for more options + storage: + type: memory + + staticClients: + - name: 'Weave GitOps Core' + id: weave-gitops + secret: AiAImuXKhoI5ApvKWF988txjZ+6rG3S7o6X5En + redirectURIs: + - 'https://localhost:9001/oauth2/callback' + - 'https://0.0.0.0:9001/oauth2/callback' + - 'http://0.0.0.0:9001/oauth2/callback' + - 'http://localhost:4567/oauth2/callback' + - 'https://localhost:4567/oauth2/callback' + - 'http://localhost:3000/oauth2/callback' + + connectors: + - type: github + id: github + name: GitHub + config: + clientID: $GITHUB_CLIENT_ID + clientSecret: $GITHUB_CLIENT_SECRET + redirectURI: https://dex.dev.example.tld/callback + orgs: + - name: weaveworks + teams: + - team-a + - team-b + - QA + - name: ww-test-org + ingress: + enabled: true + className: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + hosts: + - host: dex.dev.example.tld + paths: + - path: / + pathType: ImplementationSpecific + tls: + - hosts: + - dex.dev.example.tld + secretName: dex-dev-example-tld +``` + +
+ +:::note SSL certificate without cert manager +If we don't want to use cert manager, we can remove the related annotation and +use our predefined secret in the `tls` section. +::: + +An important part of the configuration is the `orgs` field on the GitHub +connector. + +```yaml +orgs: +- name: weaveworks + teams: + - team-a + - team-b + - QA +``` + +Here we can define groups under a GitHub organisation. In this example the +GitHub organisation is `weaveworks` and all members of the `team-a`, +`team-b`, and `QA` teams can authenticate. Group membership will be added to +the user. + +Based on these groups, we can bind roles to groups: + +
Expand to see group role bindings + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: wego-test-user-read-resources + namespace: flux-system +subjects: + - kind: Group + name: weaveworks:QA + namespace: flux-system +roleRef: + kind: Role + name: wego-admin-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: wego-admin-role + namespace: flux-system +rules: + - apiGroups: [""] + resources: ["secrets", "pods" ] + verbs: [ "get", "list" ] + - apiGroups: ["apps"] + resources: [ "deployments", "replicasets"] + verbs: [ "get", "list" ] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: ["buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories"] + verbs: ["get", "list", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] +``` + +
+ +The same way we can bind cluster roles to a group: + +
Expand to see group cluster role bindings + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: weaveworks:team-a +subjects: +- kind: Group + name: weaveworks:team-a + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +``` + +
+ +### Set up static user + +For static user, add `staticPasswords` to the `config`: + +```yaml +spec: + values: + config: + staticPasswords: + - email: "admin@example.tld" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" + username: "admin" + userID: "08a8684b-db88-4b73-90a9-3cd1661f5466" +``` + +A static user password can be generated with the `gitops` CLI: + +```bash +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash +$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q +``` + +## OIDC login + +Using the "Login with OIDC Provider" button: + +![Login page](/img/guides/setting-up-dex/oidc-login.png) + +We have to authorize the GitHub OAuth application: + +![GitHub OAuth page](/img/guides/setting-up-dex/github-auth.png) + +After that, grant access to Dex: + +![Dex grant access](/img/guides/setting-up-dex/dex-auth.png) + +Now we are logged in with our GitHub user and we can see all resources we have +access to: + +![UI logged in](/img/guides/setting-up-dex/ui-logged-in.png) diff --git a/website/versioned_docs/version-0.24.0/guides/using-terraform-templates.mdx b/website/versioned_docs/version-0.24.0/guides/using-terraform-templates.mdx new file mode 100644 index 0000000000..498921bd16 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/guides/using-terraform-templates.mdx @@ -0,0 +1,331 @@ +--- +title: Using Terraform templates +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +This guide will show you how to use a template to create a Terraform resource in Weave GitOps Enterprise. + +## CLI guide + +### Pre-requisites +- Install [Weave GitOps Enterprise](installation/weave-gitops-enterprise/index.mdx) with [TF-Controller installed](installation/weave-gitops-enterprise/index.mdx#optional-install-the-tf-controller) and [TLS enabled](../configuration/tls.md). + +### 1. Add a template to your cluster + +Add the following template to a path in your Git repository that is synced by Flux. For example, in the [Installation guide](installation/weave-gitops-enterprise/index.mdx#install-flux-onto-your-cluster-with-the-flux-bootstrap-command), we set the path that is synced by Flux to `./clusters/management`. + +Commit and push these changes. Once a template is available in the cluster, it can be used to create a resource, which will be shown in the next step. + +
Expand to see ./clusters/management/tf-template.yaml + +```yaml title="./clusters/management/tf-template.yaml" +--- +apiVersion: clustertemplates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: tf-template + namespace: default +spec: + description: + This is a sample WGE template that will be translated into a tf-controller specific template. + params: + - name: RESOURCE_NAME + description: Resource Name + resourcetemplates: + - content: + - apiVersion: infra.contrib.fluxcd.io/v1alpha1 + kind: Terraform + metadata: + name: ${RESOURCE_NAME} + namespace: flux-system + spec: + interval: 1h + path: ./ + approvePlan: auto + alwaysCleanupRunnerPod: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system +``` + +
+ +Verify that your template is in the cluster: +```bash +kubectl get gitopstemplates.clustertemplates.weave.works -A +NAME AGE +sample-wge-tf-controller-template 14m +``` + +If the template does not appear immediately, reconcile the changes with Flux: +```bash +flux reconcile kustomization flux-system +► annotating Kustomization flux-system in flux-system namespace +✔ Kustomization annotated +◎ waiting for Kustomization reconciliation +✔ applied revision main/e6f5f0c3925bcfecdb50bceb12af9a87677d2213 +``` + +### 2. Use the template to create a resource +A resource can be created from a template by specifying the template's name and supplying values to it, as well as your Weave GitOps Enterprise username, password, and HTTP API endpoint. +```bash +gitops add terraform --from-template sample-wge-tf-controller-template \ +--set="RESOURCE_NAME"="name" \ +--username= --password= \ +--endpoint https://localhost:8000 \ +--url https://github.com/myawesomeorg/myawesomerepo + +Created pull request: https://github.com/myawesomeorg/myawesomerepo/pull/5 +``` + +This will create a PR in your Git repository with a TF-Controller manifest. Once the PR is merged, TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest! + +This template can be used to create multiple resources out of the same Terraform manifest by supplying different values to the template. Any changes to the Terraform manifest will be reconciled automatically to all resources. + +### 3. List available templates +Get a specific template that can be used to create a Terraform resource: +```bash +gitops get template terraform sample-wge-tf-controller-template --endpoint https://localhost:8000 --username= --password= +NAME PROVIDER DESCRIPTION ERROR +sample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template. +``` + +List all the templates available on the cluster: +```bash +gitops get template terraform --endpoint https://localhost:8000 --username= --password= +NAME PROVIDER DESCRIPTION ERROR +sample-aurora-tf-template This is a sample Aurora RDS template. +sample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template. +``` + +### 4. List the parameters of a template +List all the parameters that can be defined on a specific template: +```bash +gitops get template terraform tf-controller-aurora --list-parameters --endpoint https://localhost:8000 --username= --password= +NAME REQUIRED DESCRIPTION OPTIONS +RESOURCE_NAME false Resource Name +``` + +## Use Case: Create an Aurora RDS with WGE +:::tip BONUS + +For a more advanced example, here is a template to create an Aurora RDS cluster using WGE with Flux and the TF-Controller. +::: + +### Pre-requisites +- Everything from the [previous section](#pre-requisites) +- Get (or create) an AWS Access Key ID and Secret Access Key. Check the [AWS docs](https://docs.aws.amazon.com/powershell/latest/userguide/pstools-appendix-sign-up.html) for details on how to do this. +- Create an AWS IAM Role for the Terraform AWS Provider. Its policy should include `iam:CreateRole`. More info [here](https://support.hashicorp.com/hc/en-us/articles/360041289933-Using-AWS-AssumeRole-with-the-AWS-Terraform-Provider). + +### 1. Configure a way to manage secrets + +Configure a way to safely store Secrets. One method is to use the Mozilla SOPS CLI, but there are other ways, such as Sealed Secrets or Vaults. + +Follow the steps in the [Flux docs](https://fluxcd.io/docs/guides/mozilla-sops/) **except** for the "Configure in-cluster secrets decryption" step! This step looks slightly different for WGE. Instead of re-creating the controllers, you can configure the `kustomize-controller` as instructed below. + +In your Git repository source, add the following to your `kustomize-controller` configuration: +```bash +cat <> ./clusters//flux-system/gotk-sync.yaml + decryption: + provider: sops + secretRef: + name: sops-gpg +EOF +``` + +### 2. Encrypt and store your credentials in your Git repository +Create a Secret to store sensitive values such as the following: +- DB username +- DB password +- AWS Access Key ID +- AWS Secret Access Key +- AWS Role ARN + +:::note +If following the Flux guide, this steps corresponds to ["Encrypting secrets using OpenPGP"](https://fluxcd.io/docs/guides/mozilla-sops/#encrypting-secrets-using-openpgp). You can stop following the Flux guide at this step. +::: + +For example, here is what you would do if using the SOPS method: +```bash +kubectl -n flux-system create secret generic tf-controller-auth \ +--from-literal=master_username=admin \ +--from-literal=master_password=change-me \ +--from-literal=aws_access_key=AKIAIOSFODNN7EXAMPLE \ +--from-literal=aws_secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ +--from-literal=aws_role_arn="arn:aws:iam::012345678910:role/wge-tf-controller-example" \ +--dry-run=client \ +-o yaml > tf-controller-auth.yaml +``` + +Then, encrypt the secret: +```bash +sops --encrypt --in-place tf-controller-auth.yaml +``` + +Commit and push your changes. You can now store encrypted secrets to your Git repository. + +### 4. Add the manifests to your cluster + +Add the following Terraform manifest to the root of your Git repository. + +
Expand to see Terraform manifest + +```yaml title="./rds.tf" +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 3.0" + } + } +} + +variable "cluster_identifier" {} +variable "database_name" {} +variable "master_username" {} +variable "master_password" {} +variable "backup_retention_period" {} +variable "region" {} +variable "aws_access_key" {} +variable "aws_secret_key" {} +variable "aws_role_arn" {} + +provider "aws" { + region = var.region + access_key = var.aws_access_key + secret_key = var.aws_secret_key + + assume_role { + role_arn = var.aws_role_arn + } +} + +locals { + engine = "aurora-mysql" + engine_version = "5.7.mysql_aurora.2.07.5" + port = 3306 +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "group-name" + values = [var.region] + } +} + +resource "aws_rds_cluster" "mycluster" { + cluster_identifier = var.cluster_identifier + engine = local.engine + engine_version = local.engine_version + port = local.port + availability_zones = slice(data.aws_availability_zones.available.names, 0, 3) + database_name = var.database_name + master_username = var.master_username + master_password = var.master_password + backup_retention_period = var.backup_retention_period + skip_final_snapshot = true + apply_immediately = true +} + +resource "aws_rds_cluster_instance" "cluster_instance" { + count = 1 + identifier = "${aws_rds_cluster.mycluster.id}-${count.index}" + cluster_identifier = aws_rds_cluster.mycluster.id + instance_class = "db.t3.small" + engine = aws_rds_cluster.mycluster.engine + engine_version = aws_rds_cluster.mycluster.engine_version +} +``` + +
+ +Add the following template to a path in your Git repository that is synced by Flux. In the [quickstart guide](installation/weave-gitops-enterprise/index.mdx#install-flux-onto-your-cluster-with-the-flux-bootstrap-command), we set this path to `./clusters/management`. + +
Expand to see Terraform manifest at +./clusters/management/rds-template.yaml + +```yaml title="./clusters/management/rds-template.yaml" +--- +apiVersion: clustertemplates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: rds-template + namespace: default +spec: + description: This is a sample Aurora RDS template. + params: + - name: RESOURCE_NAME + description: Resource Name + - name: CLUSTER_IDENTIFIER + description: Cluster Identifier + - name: DATABASE_NAME + description: Database Name + - name: BACKUP_RETENTION_PERIOD + description: Backup Retention Period + - name: REGION + description: Region + resourcetemplates: + - contents: + - apiVersion: infra.contrib.fluxcd.io/v1alpha1 + kind: Terraform + metadata: + name: ${RESOURCE_NAME} + namespace: flux-system + spec: + interval: 1h + path: ./ + approvePlan: auto + alwaysCleanupRunnerPod: true + vars: + - name: cluster_identifier + value: ${CLUSTER_IDENTIFIER} + - name: database_name + value: ${DATABASE_NAME} + - name: backup_retention_period + value: ${BACKUP_RETENTION_PERIOD} + - name: region + value: ${REGION} + varsFrom: + - kind: Secret + name: tf-controller-auth + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system +``` + +
+ +Commit and push your changes. + +:::tip +You can change the location where you keep your Terraform manifests in your Git source (which the TF-Controller will reconcile) by configuring `spec.resourcetemplates.spec.path`. +::: + +### 5. Use the template to create the RDS +```bash +gitops add terraform --from-template rds-template \ +--username= --password= \ +--endpoint https://localhost:8000 \ +--url https://github.com/myawesomeorg/myawesomerepo \ +--set "RESOURCE_NAME"="tf-controller-aurora","CLUSTER_IDENTIFIER"="super-awesome-aurora","DATABASE_NAME"="db1","BACKUP_RETENTION_PERIOD"=5,"REGION"="us-west-2" + +Created pull request: https://github.com/myawesomeorg/myawesomerepo/pull/6 +``` + +Merge the PR in your Git repository to add the TF-Controller manifest. TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest. + +Any changes to your Terraform manifest will be automatically reconciled by the TF-controller with Flux. + +You can re-use this template to create multiple Terraform resources, each with a different set of values! + +Make sure to delete the newly created RDS resources to not incur additional costs. diff --git a/website/versioned_docs/version-0.24.0/installation/aws-marketplace.mdx b/website/versioned_docs/version-0.24.0/installation/aws-marketplace.mdx new file mode 100644 index 0000000000..02a7eae6dc --- /dev/null +++ b/website/versioned_docs/version-0.24.0/installation/aws-marketplace.mdx @@ -0,0 +1,206 @@ +--- +title: AWS Marketplace +hide_title: true +pagination_next: getting-started/ui +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +## AWS Marketplace +Weave GitOps is also available via the AWS Marketplace. + +The following steps will allow you to deploy the Weave GitOps product to an EKS cluster via a Helm Chart. + +These instructions presume you already have installed [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), +[`eksctl`](https://github.com/weaveworks/eksctl), [`helm`](https://github.com/helm/helm) and +the [Helm S3 Plugin](https://github.com/hypnoglow/helm-s3). + +### Step 1: Subscribe to Weave GitOps on the AWS Marketplace + +To deploy the managed Weave GitOps solution, first subscribe to the product on [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-vkn2wejad2ix4). + +_Note: it may take ~20 minutes for your Subscription to become live and deployable._ + +### Step 2: Configure an EKS Cluster + + + + +If you do not have a cluster on EKS, you can use [`eksctl`](https://github.com/weaveworks/eksctl) to create one. + +Copy the contents of the sample file below into `cluster-config.yaml` and replace the placeholder values with your settings. +See the [`eksctl` documentation](https://eksctl.io/) for more configuration options. + +
Expand for file contents + +```yaml title="cluster-config.yaml" +--- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: CLUSTER_NAME # Change this + region: REGION # Change this + +# This section is required +iam: + withOIDC: true + serviceAccounts: + - metadata: + name: wego-service-account # Altering this will require a corresponding change in a later command + namespace: flux-system + roleOnly: true + attachPolicy: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "aws-marketplace:RegisterUsage" + Resource: '*' + +# This section will create a single Managed nodegroup with one node. +# Edit or remove as desired. +managedNodeGroups: +- name: ng1 + instanceType: m5.large + desiredCapacity: 1 +``` + +
+ +Create the cluster: + +```bash +eksctl create cluster -f cluster-config.yaml +``` + +
+ + +In order to use the Weave GitOps container product, +your cluster must be configured to run containers with the correct IAM Policies. + +The recommended way to do this is via [IRSA](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/). + +Use this `eksctl` configuration below (replacing the placeholder values) to: +- Associate an OIDC provider +- Create the required service account ARN + +Save the example below as `oidc-config.yaml` + +
Expand for file contents + +```yaml title="oidc-config.yaml" +--- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: CLUSTER_NAME # Change this + region: REGION # Change this + +# This section is required +iam: + withOIDC: true + serviceAccounts: + - metadata: + name: wego-service-account # Altering this will require a corresponding change in a later command + namespace: flux-system + roleOnly: true + attachPolicy: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "aws-marketplace:RegisterUsage" + Resource: '*' +``` + +
+ +```bash +eksctl utils associate-iam-oidc-provider -f oidc-config.yaml --approve +eksctl create iamserviceaccount -f oidc-config.yaml --approve +``` + +
+
+ + +### Step 3: Fetch the Service Account Role ARN +First retrieve the ARN of the IAM role which you created for the `wego-service-account`: + +```bash +# replace the placeholder values with your configuration +# if you changed the service account name from wego-service-account, update that in the command +export SA_ARN=$(eksctl get iamserviceaccount --cluster --region | awk '/wego-service-account/ {print $3}') + +echo $SA_ARN +# should return +# arn:aws:iam:::role/eksctl--addon-iamserviceaccount-xxx-Role1-1N41MLVQEWUOF +``` + +_This value will also be discoverable in your IAM console, and in the Outputs of the Cloud Formation +template which created it._ + +### Step 4: Install Weave GitOps + +Copy the Chart URL from the Usage Instructions in AWS Marketplace, or download the file from the Deployment template to your workstation. + +To be able to log in to your new installation, you need to set up authentication. Create a new file `values.yaml` where you set your username, and a bcrypt hash of your desired password, like so: + +```yaml title="./values.yaml" +gitops: + adminUser: + create: true + username: + passwordHash: +``` + +Then install it: + + + + +```console +helm install wego \ + --namespace=flux-system \ + --create-namespace \ + --set serviceAccountRole="$SA_ARN" \ + --values ./values.yaml +``` + + + + +```console +helm install wego \ + --namespace=flux-system \ + --create-namespace \ + --set serviceAccountName='' \ + --set serviceAccountRole="$SA_ARN" \ + --values ./values.yaml +``` + + + + +### Step 5: Check your installation + +Run the following from your workstation: + +```console +kubectl get pods -n flux-system +# you should see something like the following returned +flux-system helm-controller-5b96d94c7f-tds9n 1/1 Running 0 53s +flux-system kustomize-controller-8467b8b884-x2cpd 1/1 Running 0 53s +flux-system notification-controller-55f94bc746-ggmwc 1/1 Running 0 53s +flux-system source-controller-78bfb8576-stnr5 1/1 Running 0 53s +flux-system wego-metering-f7jqp 1/1 Running 0 53s +flux-system ww-gitops-weave-gitops-5bdc9f7744-vkh65 1/1 Running 0 53s +``` + +Your Weave GitOps installation is now ready! + +## Next steps + +In our following [Get Started document](../getting-started/ui.mdx), we will walk you through logging into the GitOps Dashboard and deploying an application. diff --git a/website/versioned_docs/version-0.24.0/installation/index.mdx b/website/versioned_docs/version-0.24.0/installation/index.mdx new file mode 100644 index 0000000000..9f8355d634 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/installation/index.mdx @@ -0,0 +1,32 @@ +--- +title: Installation +hide_title: true +--- + +# Part 0: Installing Weave GitOps + +## Check your Cluster's Kubernetes Version + +No matter which version of Weave GitOps you install, having a Kubernetes cluster up +and running is required. This version of Weave GitOps is tested against the following +[Kubernetes releases](https://kubernetes.io/releases/): + +| Kubernetes Release | End of Life | +| ------------------ | ----------- | +| 1.25 | 2023-10-27 | +| 1.24 | 2023-07-28 | +| 1.23 | 2023-02-28 | +| 1.22 | 2022-10-28 | + +Note that the version of [Flux](https://fluxcd.io/docs/installation/#prerequisites) that you use might impose further minimum version requirements. + +## Deploy Weave GitOps to your Cluster + +Depending on your setup and requirement you have the following choice + +| Installation | +| ------------------------------------------------------ | +| [Weave GitOps OSS](weave-gitops.mdx) | +| [Weave GitOps Enterprise](weave-gitops-enterprise) | +| [Weave GitOps Enterprise - Airgap Environments](weave-gitops-enterprise/airgap) | +| [AWS Marketplace](aws-marketplace.mdx) | diff --git a/website/versioned_docs/version-0.24.0/installation/weave-gitops-enterprise/airgap.mdx b/website/versioned_docs/version-0.24.0/installation/weave-gitops-enterprise/airgap.mdx new file mode 100644 index 0000000000..336e9244ee --- /dev/null +++ b/website/versioned_docs/version-0.24.0/installation/weave-gitops-enterprise/airgap.mdx @@ -0,0 +1,578 @@ +--- +title: Airgap Environments +hide_title: true +toc_max_heading_level: 4 +pagination_next: getting-started/ui +--- + +import TierLabel from "../../_components/TierLabel"; + +# Install in Airgap Environments + +From [wikipedia](https://en.wikipedia.org/wiki/Air_gap_(networking)) + +>An air gap, air wall, air gapping or disconnected network is a network security measure employed on one or more computers +to ensure that a secure computer network is physically isolated from unsecured networks, such as the public Internet or an unsecured local area network... + +This document guides on how to install Weave Gitops Enterprise in a restricted environment. + +# Before Start + +There are multiple restrictions that could happen within an airgap environment. This guide assumes that you have egress network +restrictions. In order to install Weave Gitops Enterprise (WGE), the required artifacts are required to be loaded +from a private registry. This guide helps you with the task to identity the helm charts +and container images required to install WGE and to load them into your private registry. + +It also assumes that you could prepare the installation from a proxy host. A proxy host is defined here +as a computer that is able to access to both the public and private network. It could take different shapes, +for example, it could be a bastion host, a corp laptop, etc. + +Access to both public and private network is required during the airgap installation but not simultaneously. +It is expected to have an online stage to gather the artifacts first, and an offline stage later, +to load the artifacts in the private network. + +Finally, we aim to provide an end to end example to use it as a guidance more than a recipe. Feel free to adapt the details +that do not fit within your context. + +# Install + +There are different variations of the following stages and conditions. We consider that installing +Weave Gitops Enterprise in an airgap environment could follow the following stages. + +1. Setup a WGE install environment. +2. Collect artifacts and publish to a private registry. +3. Install Weave Gitops Enterprise in the airgap environment. + +## Setup a WGE install environment + +The main goal of this stage is to recreate a local Weave Gitops Enterprise within your context, to collect +the container images and helm charts, that will be required in your private registry for the offline installation. + +A three-step setup is followed. + +1. Setup a proxy host +2. Setup a private registry +3. Install Weave Gitops Enterprise + +### Setup a proxy host + +There are many possible configurations for this host. This guide will assume that the host has installed the following: + +- [docker](https://www.docker.com/) as container runtime. +- [kubectl and kind](https://kubernetes.io/docs/tasks/tools) +- [helm](https://helm.sh/docs/intro/install/) +- [skopeo](https://github.com/containers/skopeo) to manage container images +- [flux](https://fluxcd.io/flux/cmd/) to boostrap flux in the environment. +- [clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl) to replicate the cluster management +capabilities. + +#### Create Kind Cluster + +Create a kind cluster with registry following [this guide](https://kind.sigs.k8s.io/docs/user/local-registry/) + +#### Install flux + +You could just use `flux install` to install flux into your kind cluster + +#### Setup a helm repo + +We are going to install [ChartMuseum](https://chartmuseum.com/) via flux. + +Remember to also install helm plugin +[cm-push](https://github.com/chartmuseum/helm-push). + +
Expand to see installation yaml + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: chartmuseum + namespace: flux-system +spec: + interval: 10m + url: https://chartmuseum.github.io/charts +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: chartmuseum + namespace: flux-system +spec: + chart: + spec: + chart: chartmuseum + sourceRef: + kind: HelmRepository + name: chartmuseum + namespace: flux-system + interval: 10m0s + timeout: 10m0s + releaseName: helm-repo + install: + crds: CreateReplace + remediation: + retries: 3 + values: + env: + open: + DISABLE_API: "false" + AUTH_ANONYMOUS_GET: "true" +``` + +
+ +Setup access from your host. + +```bash +#expose kubernetes svc +kubectl -n flux-system port-forward svc/helm-repo-chartmuseum 8080:8080 & + +#add hostname +sudo -- sh -c "echo 127.0.0.1 helm-repo-chartmuseum >> /etc/hosts" + +``` +Test that you could reach it. +```bash +#add repo to helm +helm repo add private http://helm-repo-chartmuseum:8080 + +#test that works +helm repo update private +``` + +At this stage you have already a private registry for container images and helm charts. + +### Install Weave Gitops Enterprise + +This step is to gather the artifacts and images in your local environment to push to the private registry. + +#### Cluster API + +This would vary depending on the provider, given that we target a offline environment, most likely we are in +a private cloud environment, so we will be using [liquidmetal](https://weaveworks-liquidmetal.github.io/site/docs/tutorial-basics/capi/). + +Export these environment variables to configure CAPI experience. Adjust them to your context. + +```shell +export CAPI_BASE_PATH=/tmp/capi +export CERT_MANAGER_VERSION=v1.9.1 +export CAPI_VERSION=v1.3.0 +export CAPMVM_VERSION=v0.7.0 +export EXP_CLUSTER_RESOURCE_SET=true +export CONTROL_PLANE_MACHINE_COUNT=1 +export WORKER_MACHINE_COUNT=1 +export CONTROL_PLANE_VIP="192.168.100.9" +export HOST_ENDPOINT="192.168.1.130:9090" +``` + +Execute the following script to generate `clusterctl` config file. + +```shell +cat << EOF > clusterctl.yaml +cert-manager: + url: "$CAPI_BASE_PATH/cert-manager/$CERT_MANAGER_VERSION/cert-manager.yaml" + +providers: + - name: "microvm" + url: "$CAPI_BASE_PATH/infrastructure-microvm/$CAPMVM_VERSION/infrastructure-components.yaml" + type: "InfrastructureProvider" + - name: "cluster-api" + url: "$CAPI_BASE_PATH/cluster-api/$CAPI_VERSION/core-components.yaml" + type: "CoreProvider" + - name: "kubeadm" + url: "$CAPI_BASE_PATH/bootstrap-kubeadm/$CAPI_VERSION/bootstrap-components.yaml" + type: "BootstrapProvider" + - name: "kubeadm" + url: "$CAPI_BASE_PATH/control-plane-kubeadm/$CAPI_VERSION/control-plane-components.yaml" + type: "ControlPlaneProvider" +EOF +``` +Execute `make` using the following makefile to intialise capi in your cluster: + +
Expand to see Makefile contents + +```makefile +.PHONY := capi + +capi: capi-init capi-cluster + +capi-init: cert-manager cluster-api bootstrap-kubeadm control-plane-kubeadm microvm clusterctl-init + +cert-manager: + mkdir -p $(CAPI_BASE_PATH)/cert-manager/$(CERT_MANAGER_VERSION) + curl -L https://github.com/cert-manager/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml --output $(CAPI_BASE_PATH)/cert-manager/$(CERT_MANAGER_VERSION)/cert-manager.yaml + +cluster-api: + mkdir -p $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION) + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/core-components.yaml --output $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)/core-components.yaml + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)/metadata.yaml + +bootstrap-kubeadm: + mkdir -p $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION) + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/bootstrap-components.yaml --output $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)/bootstrap-components.yaml + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)/metadata.yaml + +control-plane-kubeadm: + mkdir -p $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION) + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/control-plane-components.yaml --output $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)/control-plane-components.yaml + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)/metadata.yaml + +microvm: + mkdir -p $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION) + curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/infrastructure-components.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/infrastructure-components.yaml + curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/cluster-template-cilium.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/cluster-template-cilium.yaml + curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/metadata.yaml + +clusterctl-init: + clusterctl init --wait-providers -v 4 --config clusterctl.yaml --infrastructure microvm + +capi-cluster: + clusterctl generate cluster --config clusterctl.yaml -i microvm:$(CAPMVM_VERSION) -f cilium lm-demo | kubectl apply -f - +``` + +
+ +#### TF-Controller + +Apply the following example manifest to deploy the terraform controller: + +
Expand to see file contents + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: tf-controller + namespace: flux-system +spec: + interval: 10m + url: https://weaveworks.github.io/tf-controller/ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: tf-controller + namespace: flux-system +spec: + chart: + spec: + chart: tf-controller + version: "0.9.2" + sourceRef: + kind: HelmRepository + name: tf-controller + namespace: flux-system + interval: 10m0s + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + crds: CreateReplace +``` + +
+ +#### Weave Gitops Enterprise + +Update the following manifest to your context. + +
Expand to see file contents + +```yaml {4-7,19-20} +--- +apiVersion: v1 +data: + deploy-key: + entitlement: + password: + username: +kind: Secret +metadata: + labels: + kustomize.toolkit.fluxcd.io/name: shared-secrets + kustomize.toolkit.fluxcd.io/namespace: flux-system + name: weave-gitops-enterprise-credentials + namespace: flux-system +type: Opaque +--- +apiVersion: v1 +data: + password: + username: +kind: Secret +metadata: + labels: + kustomize.toolkit.fluxcd.io/name: enterprise + kustomize.toolkit.fluxcd.io/namespace: flux-system + name: cluster-user-auth + namespace: flux-system +type: Opaque +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weave-gitops-enterprise-charts + namespace: flux-system +spec: + interval: 10m + secretRef: + name: weave-gitops-enterprise-credentials + url: https://charts.dev.wkp.weave.works/releases/charts-v3 +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops-enterprise + namespace: flux-system +spec: + chart: + spec: + chart: mccp + version: "0.10.2" + sourceRef: + kind: HelmRepository + name: weave-gitops-enterprise-charts + namespace: flux-system + interval: 10m0s + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + crds: CreateReplace + values: + global: + capiEnabled: true + enablePipelines: true + enableTerraformUI: true + clusterBootstrapController: + enabled: true + cluster-controller: + controllerManager: + kubeRbacProxy: + image: + repository: gcr.io/kubebuilder/kube-rbac-proxy + tag: v0.8.0 + manager: + image: + repository: docker.io/weaveworks/cluster-controller + tag: v1.4.1 + policy-agent: + enabled: true + image: weaveworks/policy-agent + pipeline-controller: + controller: + manager: + image: + repository: ghcr.io/weaveworks/pipeline-controller + images: + clustersService: docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2 + uiServer: docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2 + clusterBootstrapController: weaveworks/cluster-bootstrap-controller:v0.4.0 +``` + +
+ +At this stage you have a local management cluster with Weave Gitops Enterprise installed. + +```bash +➜ kubectl get pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +... +flux-system weave-gitops-enterprise-cluster-controller-6f8c69dc8-tq994 2/2 Running 5 (12h ago) 13h +flux-system weave-gitops-enterprise-mccp-cluster-bootstrap-controller-cxd9c 2/2 Running 0 13h +flux-system weave-gitops-enterprise-mccp-cluster-service-8485f5f956-pdtxw 1/1 Running 0 12h +flux-system weave-gitops-enterprise-pipeline-controller-85b76d95bd-2sw7v 1/1 Running 0 13h +... +``` + +You can observe the installed Helm Charts with `kubectl` + +```bash +kubectl get helmcharts.source.toolkit.fluxcd.io +NAME CHART VERSION SOURCE KIND SOURCE NAME AGE READY STATUS +flux-system-cert-manager cert-manager 0.0.7 HelmRepository weaveworks-charts 13h True pulled 'cert-manager' chart with version '0.0.7' +flux-system-tf-controller tf-controller 0.9.2 HelmRepository tf-controller 13h True pulled 'tf-controller' chart with version '0.9.2' +flux-system-weave-gitops-enterprise mccp v0.10.2 HelmRepository weave-gitops-enterprise-charts 13h True pulled 'mccp' chart with version '0.10.2' +``` + +As well as the container images + +```bash + +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec['containers','initContainers'][*].image}" |tr -s '[[:space:]]' '\n' \ +| sort | uniq | grep -vE 'kindest|etcd|coredns' + +docker.io/prom/prometheus:v2.34.0 +docker.io/weaveworks/cluster-controller:v1.4.1 +docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2 +docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2 +ghcr.io/fluxcd/flagger-loadtester:0.22.0 +ghcr.io/fluxcd/flagger:1.21.0 +ghcr.io/fluxcd/helm-controller:v0.23.1 +ghcr.io/fluxcd/kustomize-controller:v0.27.1 +ghcr.io/fluxcd/notification-controller:v0.25.2 +... +``` + +## Collect and publish artifacts + +This section guides you to push installed artifacts to your private registry. +The following Makefile is supplied to help you with each stage. + +
Expand to see Makefile contents + +```makefile {4-6} +.PHONY := all + +#set these variable with your custom configuration +PRIVATE_HELM_REPO_NAME=private +REGISTRY=localhost:5001 +WGE_VERSION=0.10.2 + +WGE=mccp-$(WGE_VERSION) +WGE_CHART=$(WGE).tgz + +all: images charts + +charts: pull-charts push-charts + +images: + kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec['containers','initContainers'][*].image}" \ + |tr -s '[[:space:]]' '\n' | sort | uniq | grep -vE 'kindest|kube-(.*)|etcd|coredns' | xargs -L 1 -I {} ./image-sync.sh {} $(REGISTRY) + kubectl get microvmmachinetemplates --all-namespaces -o jsonpath="{.items[*].spec.template.spec.kernel.image}"|tr -s '[[:space:]]' '\n' \ + | sort | uniq | xargs -L 1 -I {} ./image-sync.sh {} $(REGISTRY) + +pull-charts: + curl -L https://s3.us-east-1.amazonaws.com/weaveworks-wkp/releases/charts-v3/$(WGE_CHART) --output $(WGE_CHART) + +push-charts: + helm cm-push -f $(WGE_CHART) $(PRIVATE_HELM_REPO_NAME) +``` + +
+ +The `image-sync.sh` referenced in the `images` target of the the above Makefile +is similar to: + +```shell +skopeo copy docker://$1 docker://$2/$1 --preserve-digests --multi-arch=all +``` + +>[Skopeo](https://github.com/containers/skopeo) allows you to configure a range a security features to meet your requirements. +For example, configuring trust policies before pulling or signing containers before making them available in your private network. +Feel free to adapt the previous script to meet your security needs. + +1. Configure the environment variables to your context. +2. Execute `make` to automatically sync helm charts and container images. + +```bash +➜ resources git:(docs-airgap-install) ✗ make +kubectl get microvmmachinetemplates --all-namespaces -o jsonpath="{.items[*].spec.template.spec.kernel.image}"|tr -s '[[:space:]]' '\n' \ + | sort | uniq | xargs -L 1 -I {} ./image-pull-push.sh {} docker-registry:5000 + +5.10.77: Pulling from weaveworks-liquidmetal/flintlock-kernel +Digest: sha256:5ef5f3f5b42a75fdb69cdd8d65f5929430f086621e61f00694f53fe351b5d466 +Status: Image is up to date for ghcr.io/weaveworks-liquidmetal/flintlock-kernel:5.10.77 +ghcr.io/weaveworks-liquidmetal/flintlock-kernel:5.10.77 +...5.10.77: digest: sha256:5ef5f3f5b42a75fdb69cdd8d65f5929430f086621e61f00694f53fe351b5d466 size: 739 +``` + +## Airgap Install + +### Weave Gitops Enterprise +At this stage you have in your private registry both the helm charts and container images required to install weave gitops +enterprise.Now you are ready to install WGE from your private registry. + +Follow the indications to [install wge](https://docs.gitops.weave.works/docs/installation/#installing-weave-gitops-enterprise) +with the following considerations: + +1. Adjust Helm Releases `spec.chart.spec.sourceRef` to tell Flux to pull helm charts from your helm repo. +2. Adjust Helm Releases `spec.values` to use the container images from your private registry. + +An example of how it would look like for Weave Gitops Enterprise is shown below. + +
Expand to view example WGE manifest + +```yaml title="weave-gitops-enterprise.yaml" {21-24,32} +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weave-gitops-enterprise-charts + namespace: flux-system +spec: + interval: 1m + url: http://helm-repo-chartmuseum:8080 +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops-enterprise + namespace: flux-system +spec: + chart: + spec: + chart: mccp + version: "0.10.2" + sourceRef: + kind: HelmRepository + name: weave-gitops-enterprise-charts + namespace: flux-system + interval: 1m0s + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + crds: CreateReplace + values: + global: + capiEnabled: true + enablePipelines: true + enableTerraformUI: true + clusterBootstrapController: + enabled: true + #images changed + cluster-controller: + controllerManager: + kubeRbacProxy: + image: + repository: localhost:5001/gcr.io/kubebuilder/kube-rbac-proxy + tag: v0.8.0 + manager: + image: + repository: localhost:5001/docker.io/weaveworks/cluster-controller + tag: v1.4.1 + policy-agent: + enabled: true + image: localhost:5001/weaveworks/policy-agent + pipeline-controller: + controller: + manager: + image: + repository: localhost:5001/ghcr.io/weaveworks/pipeline-controller + images: + clustersService: localhost:5001/docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2 + uiServer: localhost:5001/docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2 + clusterBootstrapController: localhost:5001/weaveworks/cluster-bootstrap-controller:v0.4.0 +``` + +
+ +### Cluster API + +Indicate in the cluster api configuration file `clusterctl.yaml` that you want to use images from the private repo +by leveraging [image overrides](https://cluster-api.sigs.k8s.io/clusterctl/configuration.html#image-overrides). + +```yaml +images: + all: + repository: localhost:5001/registry.k8s.io/cluster-api + infrastructure-microvm: + repository: localhost:5001/ghcr.io/weaveworks-liquidmetal +``` +Then execute `make clusterctl-init` to init capi using your private registry. diff --git a/website/versioned_docs/version-0.24.0/installation/weave-gitops-enterprise/index.mdx b/website/versioned_docs/version-0.24.0/installation/weave-gitops-enterprise/index.mdx new file mode 100644 index 0000000000..fa26c52bae --- /dev/null +++ b/website/versioned_docs/version-0.24.0/installation/weave-gitops-enterprise/index.mdx @@ -0,0 +1,450 @@ +--- +title: Weave GitOps Enterprise +hide_title: true +pagination_next: getting-started/ui +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import TierLabel from "../../_components/TierLabel"; +import CurlCodeBlock from "../../_components/CurlCodeBlock"; +import oauthBitbucket from '/img/oauth-bitbucket.png'; +import oauthAzureDevOps from '/img/oauth-azure-devops.png'; +import oauthAzureDevOpsSuccess from '/img/oauth-azure-devops-success.png'; + +## Installing Weave GitOps Enterprise + +:::info +To purchase entitlement to Weave GitOps Enterprise Edition please contact [sales@weave.works](mailto:sales@weave.works) + +For more information about Weave GitOps Enterprise Edition, see the [Enterprise feature page](../../intro-ee.mdx). +::: + +Follow the instructions on this page to: + +import TOCInline from "@theme/TOCInline"; + + { + const trimStart = toc.slice(toc.findIndex((node) => node.id == 'installing-weave-gitops-enterprise')+1); + return trimStart.slice(0, trimStart.findIndex((node) => node.level == '2')); + })()} /> + +:::tip +There is no need to install Weave GitOps (OSS) before installing Weave GitOps Enterprise +::: + +### 1. Set up a Management Cluster with `flux` + +To get you started in this document we'll cover: + +- `kind` as our management cluster with the _CAPD_ provider +- **EKS** as our management cluster with the _CAPA_ provider + +However Weave GitOps Enterprise supports any combination of management cluster and CAPI provider. + + + + +##### 1.1 We start with creating a kind-config. + +```yaml title="kind-config.yaml" +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + extraMounts: + - hostPath: /var/run/docker.sock + containerPath: /var/run/docker.sock +``` + +The `extraMounts` are for the Docker CAPI provider (CAPD) to be able to talk to the host docker + +##### 1.2 Start your kind cluster using the configuration above and Kubernetes v1.23.6 + +```bash +kind create cluster --config kind-config.yaml --image=kindest/node:v1.23.6 +``` + + + + +##### 1.1 Prepare IAM for installation + +The Cluster API needs special permissions in AWS. Use the `clusterawsadm` command below to roll out a CloudStack to installs the permissions into your AWS account. While the CloudStack is bound to a region, the resulting permissions are globally scoped. You can use any AWS Region that you have access to. The `clusterawsadm` command takes an AWSIAMConfiguration file. We have provided a working example for you : + +```yaml title="eks-config.yaml" +apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1beta1 +kind: AWSIAMConfiguration +spec: + bootstrapUser: + enable: true + eks: + iamRoleCreation: false # Set to true if you plan to use the EKSEnableIAM feature flag to enable automatic creation of IAM roles + defaultControlPlaneRole: + disable: false # Set to false to enable creation of the default control plane role + managedMachinePool: + disable: false # Set to false to enable creation of the default node pool role +``` + +Run `clusterawsadm` command to create the IAM group. + +```bash +$ clusterawsadm bootstrap iam create-cloudformation-stack --config eks-config.yaml --region $REGION +``` + +Create an IAM User. This user will be used as a kind of service account. Assign the newly created group to this user. The group name will be something like: `cluster-api-provider-aws-s-AWSIAMGroupBootstrapper-XXXX`. Create a secret for the newly created IAM user. + +##### 1.2 Create the cluster + +In testing we used the following values +`$INSTANCESIZE` : t3.large +`$NUMOFNODES` : 2 +`$MINNODES` : 2 +`$MAXNODES` : 6 + +```bash +eksctl create cluster -n "$CLUSTERNAME" -r "$REGION" --nodegroup-name workers -t $INSTANCESIZE --nodes $NUMOFNODES --nodes-min $MINNODES --nodes-max $MAXNODES --ssh-access --alb-ingress-access +``` + +##### 1.3 Add cluster to kubeconfig + +Once the cluster is created, add the cluster to your `kubeconfig` + +```bash +aws eks --region "$REGION" update-kubeconfig --name "$CLUSTERNAME" +``` + + + + +##### Install Flux onto your cluster with the `flux bootstrap` command. + + + + +```bash +flux bootstrap github \ + --owner= \ + --repository=fleet-infra \ + --branch=main \ + --path=./clusters/management \ + --personal +``` + + + + + +```bash +flux bootstrap gitlab \ + --owner= \ + --repository=fleet-infra \ + --branch=main \ + --path=./clusters/management \ + --personal +``` + + + + + + +* **owner** - The username (or organization) of the git repository +* **repository** - Git repository name +* **branch** - Git branch (default "main") +* **path** - path relative to the repository root, when specified the cluster sync will be scoped to this path +* **personal** - if set, the owner is assumed to be a repo user + +More information about `flux` and the `flux bootstrap` command can be found [here](https://fluxcd.io/docs/cmd/) + +:::note At this point a few things have occurred: +* Your Flux management cluster is now running +* A new git repo was created based on the parameters you set in the `flux bootstrap` command. Take a look at your repositories. +::: + +### 2. Install a CAPI provider + +:::note `clusterctl` versions + +The example templates provided in this guide have been tested with `clusterctl` version `1.1.3`. However you might need to use an older or newer version depending on the capi-providers you plan on using. + +Download a specific version of clusterctl from the [releases page](https://github.com/kubernetes-sigs/cluster-api/releases). +::: + +In order to be able to provision Kubernetes clusters, a CAPI provider needs to be installed. See [Cluster API Providers](../../cluster-management/cluster-api-providers.mdx) page for more details on providers. +Here we'll continue with our example instructions for CAPD and CAPA. + + + + +```bash +# Enable support for `ClusterResourceSet`s for automatically installing CNIs +export EXP_CLUSTER_RESOURCE_SET=true + +clusterctl init --infrastructure docker +``` + + + + +```bash +export EXP_EKS=true +export EXP_MACHINE_POOL=true +export CAPA_EKS_IAM=true +export EXP_CLUSTER_RESOURCE_SET=true + +clusterctl init --infrastructure aws +``` + + + + +### 3. Apply the entitlements secret + +Contact sales@weave.works for a valid entitlements secret. Then apply it to the cluster: + +```bash +kubectl apply -f entitlements.yaml +``` + +### 4. Configure access for writing to git from the UI + + + +GitHub requires no additional configuration for OAuth git access + + + +Create a GitLab OAuth Application that will request `api` permissions to create pull requests on the user's behalf. +Follow the [GitLab docs](https://docs.gitlab.com/ee/integration/oauth_provider.html). + +The application should have at least these scopes: + +- `api` +- `openid` +- `email` +- `profile` + +Add callback URLs to the application for each address the UI will be exposed on, e.g.: + +- `https://localhost:8000/oauth/gitlab` For port-forwarding and testing +- `https://git.example.com/oauth/gitlab` For production use + +Save your application and take note of the **Client ID** and **Client Secret** and save +them into the `git-provider-credentials` secret along with: + +- `GIT_HOST_TYPES` to tell WGE that the host is gitlab +- `GITLAB_HOSTNAME` where the OAuth app is hosted + +**Replace values** in this snippet and run: + +```bash +kubectl create secret generic git-provider-credentials --namespace=flux-system \ + --from-literal="GITLAB_CLIENT_ID=13457" \ + --from-literal="GITLAB_CLIENT_SECRET=24680" \ + --from-literal="GITLAB_HOSTNAME=git.example.com" \ + --from-literal="GIT_HOST_TYPES=git.example.com=gitlab" +``` + + + + +Create a new [incoming application link](https://confluence.atlassian.com/bitbucketserver/configure-an-incoming-link-1108483657.html) from +the BitBucket administration dashboard. You will be asked to enter a unique name and the redirect URL for the external application. The redirect URL +should be set to `/oauth/bitbucketserver`. You will also need to select permissions for the application. The minimum set of +permissions needed for WGE to create pull requests on behalf of users is `Repositories - Write`. An example of configuring these settings is shown below. + +
+ + + +
Configuring a new incoming application link
+
+ + +Save your application and take note of the **Client ID** and **Client Secret** and save +them into the `git-provider-credentials` secret along with: + +- `GIT_HOST_TYPES` to tell WGE that the host is bitbucket-server +- `BITBUCKET_SERVER_HOSTNAME` where the OAuth app is hosted + +**Replace values** in this snippet and run: + +```bash +kubectl create secret generic git-provider-credentials --namespace=flux-system \ + --from-literal="BITBUCKET_SERVER_CLIENT_ID=13457" \ + --from-literal="BITBUCKET_SERVER_CLIENT_SECRET=24680" \ + --from-literal="BITBUCKET_SERVER_HOSTNAME=git.example.com" \ + --from-literal="GIT_HOST_TYPES=git.example.com=bitbucket-server" +``` + +If the secret is already present, use the following command to update it using your default editor: + +```bash +kubectl edit secret generic git-provider-credentials --namespace=flux-system +``` + +:::info + +If BitBucket Server is running on the default port (7990), make sure you include the port number in the values of the secret, for example: `GIT_HOST_TYPES=git.example.com:7990=bitbucket-server` + +::: + +
+ + +Navigate to https://app.vsaex.visualstudio.com/app/register and register a new application, as explained in the [docs](https://learn.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/oauth?view=azure-devops#1-register-your-app). You will be asked to set the authorization callback URL as well as select which scopes to grant. The callback URL should be set to `/oauth/azuredevops`. You will also need to select the `Code (read and write)` scope from the list as this is needed for WGE to be able to create pull requests on behalf of users. An example of configuring these settings is shown below. + +
+ +
Creating a new application
+
+ +After creating your application, you will be presented with the application settings. Take note of the `App ID` and `Client Secret` values as you will use them to configure WGE. + +
+ +
Application settings
+
+ +In your cluster, create a secret named `git-provider-credentials` that contains the `App ID` and `Client Secret` values from the newly created application. + +**Replace values** in this snippet and run: + +```bash +kubectl create secret generic git-provider-credentials --namespace=flux-system \ + --from-literal="AZURE_DEVOPS_CLIENT_ID=" \ + --from-literal="AZURE_DEVOPS_CLIENT_SECRET=" +``` + +WGE is now configured to ask users for authorization the next time a pull request needs to be created as part of using a template. Note that each user can view and manage which applications they have authorized by navigating to https://app.vsaex.visualstudio.com/me. + +
+
+ + +### 5. Configure and commit + +We deploy WGE via a Helm chart. We'll save and adapt the below template, before committing it to git to a flux-reconciled path. + +Clone the newly created repo locally as we're gonna add some things! + +``` +git clone git@:/fleet-infra +cd fleet-infra +``` + +Download the helm-release to `clusters/management/weave-gitops-enterprise.yaml`. + +import ExampleWGE from "../../assets/example-enterprise-helm.yaml"; +import ExampleWGEContent from "!!raw-loader!../../assets/example-enterprise-helm.yaml"; + +
Expand to see file contents + + + +
+ +Once you have copied the above file, open and adjust the following configuration +options: + +#### `values.config.capi.repositoryURL` +Ensure this has been set to your repository URL. + +#### `values.config.capi.repositoryPath` +By default, WGE will create new clusters in the `clusters/management/clusters` path. +This can be configured with `values.config.capi.repositoryPath`. +For example you might what to change it to `clusters/my-cluster/cluster` if you configured flux to reconcile `./clusters/my-cluster` instead. + +#### `values.config.capi.repositoryClustersPath` +The other important path to configure is where applications and workloads that will be run on the new cluster will be stored. +By default this is `./clusters`. When a new cluster is specified any profiles that have been selected will be written to `./clusters/{.namespace}/{.clusterName}/profiles.yaml`. +When the new cluster is bootstrapped, flux will be sync the `./clusters/{.namespace}/{.clusterName}` path. + +#### (Optional) Install policy agent + +[Policy agent](../../policy/intro.mdx) comes packaged with the WGE chart. To install it you need to set the following values: + +- `values.policy-agent.enabled`: set to true to install the agent with WGE +- `values.policy-agent.config.accountId`: organization name, used as identifier +- `values.policy-agent.config.clusterId`: unique identifier for the cluster + +Commit and push all the files + +```bash +git add clusters/management/weave-gitops-enterprise.yaml +git commit -m "Deploy Weave GitOps Enterprise" +git push +``` + +Flux will reconcile the helm-release and WGE will be deployed into the cluster. You can check the `flux-system` namespace to verify all pods are running. + +### 6. Configure password + +In order to login to the WGE UI, you need to generate a bcrypt hash for your chosen password and store it as a secret in the Kubernetes cluster. + +There are several different ways to generate a bcrypt hash, this guide uses `gitops get bcrypt-hash` from our CLI, which can be installed by following +the instructions [here](#gitops-cli). + +```bash +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash +$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q +``` + +Use the hashed output to create a Kubernetes username/password secret. + +```bash +kubectl create secret generic cluster-user-auth \ + --namespace flux-system \ + --from-literal=username=wego-admin \ + --from-literal=password='$2a$.......' +``` + +### 7. Install the CLI +Install the Weave GitOps Enterprise CLI tool. +You can use brew or curl + +```console +brew install weaveworks/tap/gitops-ee +``` + +```bash +curl --silent --location "https://artifacts.wge.dev.weave.works/releases/bin/0.22.0/gitops-$(uname | tr '[:upper:]' '[:lower:]')-$(uname -m).tar.gz" | tar xz -C /tmp +sudo mv /tmp/gitops /usr/local/bin +gitops version +``` + +## Next steps + +In our following [Get Started document](../../getting-started/ui.mdx), we will walk you through logging into the GitOps Dashboard and deploying an application. + +Then you can head over to either: + +- [Cluster Management - Getting started](../../cluster-management/getting-started.mdx) to create your first CAPI Cluster with `kind`/CAPD +- [Deploying CAPA with EKS](../../guides/deploying-capa.mdx) to create your first CAPI Cluster with EKS/CAPA. + +### (Optional) Install the TF-Controller + +The [TF-Controller](https://weaveworks.github.io/tf-controller/) is a controller for Flux to reconcile Terraform resources in a GitOps way. + +With Flux and the TF-Controller, Weave GitOps Enterprise makes it easy to add Terraform templates to clusters and continuously reconcile any changes made to the Terraform source manifest. + +Check out our guide on how to [use Terraform templates](../../guides/using-terraform-templates.mdx), and why not try your hands at using it with the RDS example! + +Install the TF-Controller to a cluster using Helm: + +```console +# Add tf-controller helm repository +helm repo add tf-controller https://weaveworks.github.io/tf-controller/ + +# Install tf-controller +helm upgrade -i tf-controller tf-controller/tf-controller \ + --namespace flux-system +``` + +Consult the TF-Controller [Installation](https://weaveworks.github.io/tf-controller/getting_started/) documentation for more details on which parameters are configurable and how to install a specific version. diff --git a/website/versioned_docs/version-0.24.0/installation/weave-gitops.mdx b/website/versioned_docs/version-0.24.0/installation/weave-gitops.mdx new file mode 100644 index 0000000000..f0b24164c4 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/installation/weave-gitops.mdx @@ -0,0 +1,204 @@ +--- +title: Weave GitOps OSS +hide_title: true +pagination_next: getting-started/ui +--- + +## Installing Weave GitOps on your Cluster + +:::tip +These are the instructions to install the OSS tier Weave GitOps. To install Enterprise +Weave GitOps, follow the instructions [here][ee-install]. +::: + +### Before you begin + +To follow along, you will need the following: +- A Kubernetes cluster - such as [Kind][kind] +- A [GitHub][github] account and [personal access token with repo permissions][pat] +- [kubectl][kubectl] + +### Install Flux + +Weave GitOps is an extension to Flux and therefore requires that Flux 0.32 or +later has already been installed on your Kubernetes cluster. Full documentation +is available [here][fl-install]. + +This version of Weave GitOps is tested against the following Flux releases: +* 0.36 +* 0.35 +* 0.34 +* 0.33 +* 0.32 + +In this section we are going to do the following: + +- Create a git repository `fleet-infra` in your Git account +- Add Flux component manifests to the repository +- Deploy Flux Components to your Kubernetes Cluster +- Configure Flux components to track the path `./clusters/my-cluster/` in the repository + +Let's get into it... :sparkles: + +1. Install the flux CLI + + ``` + brew install fluxcd/tap/flux + ``` + + For other installation methods, see the relevant [Flux documentation][fl-install]. + +1. Export your credentials (ensure your PAT has `repo` scope) + + ``` + export GITHUB_TOKEN= + export GITHUB_USER= + ``` + +1. Check your Kubernetes cluster + + ``` + flux check --pre + ``` + + The output is similar to: + ``` + ► checking prerequisites + ✔ kubernetes 1.22.2 >=1.20.6 + ✔ prerequisites checks passed + ``` + +1. Install Flux onto your cluster with the `flux bootstrap` command. The command + below assumes the Git provider to be `github`, alter this if you would rather use + `gitlab`. + + ``` + flux bootstrap github \ + --owner=$GITHUB_USER \ + --repository=fleet-infra \ + --branch=main \ + --path=./clusters/my-cluster \ + --personal + ``` + + :::info + Full installation documentation including how to work with other Git providers is available [here][fl-install]. + ::: + +1. If you navigate to your Git provider, you will see that the `fleet-infra` + repository has been created. + +### Install the `gitops` CLI + +Weave GitOps includes a command-line interface to help users create and manage resources. + +:::note Installation options +The `gitops` CLI is currently supported on Mac (x86 and Arm), and Linux - including Windows Subsystem for Linux (WSL). + +Windows support is a [planned enhancement](https://github.com/weaveworks/weave-gitops/issues/663). +::: + +There are multiple ways to install the `gitops` CLI: + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + + + + +```bash +curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.24.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp +sudo mv /tmp/gitops /usr/local/bin +gitops version +``` + + + + +```console +brew tap weaveworks/tap +brew install weaveworks/tap/gitops +``` + + + + +### Deploy Weave GitOps + +In this section we will do the following: + +- Use the GitOps CLI tool to generate [`HelmRelease`][helm-rel] and [`HelmRepository`][helm-repo] objects. +- Create some login credentials to access the dashboard. This is a simple but **insecure** + method of protecting and accessing your GitOps dashboard. +- Commit the generated yamls to our `fleet-infra` repo. +- Observe as they are synced to the cluster. + +1. Clone your git repository where Flux has been bootstrapped. + + ``` + git clone https://github.com/$GITHUB_USER/fleet-infra + cd fleet-infra + ``` + +1. Run the following command which will create a `HelmRepository` and `HelmRelease` to deploy Weave GitOps + + ``` + PASSWORD="" + gitops create dashboard ww-gitops \ + --password=$PASSWORD \ + --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml + ``` + + :::warning + This command stores a hash of a password. While this is relatively safe for demo + and testing purposes it is recommended that you look at more secure methods of storing secrets + (such as [Flux's SOPS integration][sops]) for production systems. + + More guidance and alternative login methods can be found in [Securing access to the dashboard][dash-access]. + ::: + +1. Commit and push the `weave-gitops-dashboard.yaml` to the `fleet-infra` repository + + ``` + git add -A && git commit -m "Add Weave GitOps Dashboard" + git push + ``` + +1. Validate that Weave GitOps and Flux are installed. _Note: this wont be instantaneous, + give the Flux controllers a couple of minutes to pull the latest commit._ + + ``` + kubectl get pods -n flux-system + ``` + + You should see something similar to: + + ``` + NAME READY STATUS RESTARTS AGE + helm-controller-5bfd65cd5f-gj5sz 1/1 Running 0 10m + kustomize-controller-6f44c8d499-s425n 1/1 Running 0 10m + notification-controller-844df5f694-2pfcs 1/1 Running 0 10m + source-controller-6b6c7bc4bb-ng96p 1/1 Running 0 10m + ww-gitops-weave-gitops-86b645c9c6-k9ftg 1/1 Running 0 5m + ``` + + :::tip + There are many other things you can configure in the Weave GitOps Helm Chart. + The full Chart reference can be found [here](../references/helm-reference.md). + ::: + +## Next steps + +In the following [Get Started document](../getting-started/ui.mdx), we will walk you +through logging into the GitOps Dashboard and deploying an application. + +[ee-install]: ../weave-gitops-enterprise +[kind]: https://kind.sigs.k8s.io/docs/user/quick-start/ +[github]: https://github.com +[pat]: https://help.github.com/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line +[kubectl]: https://kubernetes.io/docs/tasks/tools/#kubectl +[fl-install]: https://fluxcd.io/docs/installation/ +[dash-access]: ../configuration/securing-access-to-the-dashboard.mdx +[sops]: https://fluxcd.io/docs/guides/mozilla-sops/ +[helm-repo]: https://fluxcd.io/flux/components/source/helmrepositories/#writing-a-helmrepository-spec +[helm-rel]: https://fluxcd.io/flux/components/helm/helmreleases/ diff --git a/website/versioned_docs/version-0.24.0/intro-ee.mdx b/website/versioned_docs/version-0.24.0/intro-ee.mdx new file mode 100644 index 0000000000..0400cb07b3 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/intro-ee.mdx @@ -0,0 +1,69 @@ +--- +title: Enterprise Edition +hide_title: true +--- +import TierLabel from "./_components/TierLabel"; +import Link from "@docusaurus/Link"; + +# Weave GitOps Enterprise Edition + +:::tip Ready for more GitOps? +To purchase entitlement to Weave GitOps Enterprise Edition please contact [sales@weave.works](mailto:sales@weave.works) +::: + +WGEE provides ops teams with an easy way to assess the +health of multiple clusters in a single place. It shows cluster information such as +Kubernetes version and number of nodes and provides details about the GitOps operations +on those clusters, such as Git repositories and recent commits. Additionally, it +aggregates Prometheus alerts to assist with troubleshooting. + +If you have already purchased your entitlement, head to the [installation page](./installation/weave-gitops-enterprise/index.mdx). + +## Feature breakdown + +In addition to the features in the OSS edition, Weave GitOps Enterprise Edition +offers the following capabilities, taking your delivery from simple CD to IDP: + +### :boat: Cluster Fleet Management +Deploy the same application into many different clusters even across +cloud and hybrid environments, allowing change deployments across the fleet +via Git and Cluster API. + +### :closed_lock_with_key: Trusted Application Delivery +Add policy as code to GitOps pipelines and enforce security and compliance, +application resilience and coding standards from source to production. +Validate policy conformance at every step in the software delivery pipeline: +commit, build, deploy and run time. + +### :black_right_pointing_double_triangle_with_vertical_bar: Progressive Delivery +Deploy into production environments safely using canary, blue/green and A/B +strategies. Simple single file configuration defines success / rollback SLO +using observability metrics from Prometheus, Datadog, New Relic and others. + +### :infinity: CD Pipelines +Rollout new software from development to production. +Environment rollouts that work with your existing CI system. + +### :factory_worker::female-factory-worker: Team Workspaces +Allow DevOps teams to work seamlessly together with multi-tenancy. +Total RBAC control and policy enforcement with integration to enterprise IAM. + +### :point_up_2: Self-Service Templates and Profiles +Component profiles enable teams to deploy standard services quickly, +consistently and reliably. Teams can curate the profiles that are available +within their estate ensuring there is consistency everywhere. Using GitOps +it's easy to guarantee the latest, secure versions of any component are +deployed in all production systems. + +### :sparkling_heart: Health Status and Compliance Dashboards +Gain a single view of the health and state of the cluster and its workloads. +Monitor deployments and alert on policy violations across apps and clusters. + +### :compass: Kubernetes Anywhere +Reduce complexity with GitOps and install across all major target environments +including support for on-premise, edge, hybrid, and multi-cloud Kubernetes clusters + +### :bell: Critical 24/7 Support +Your business and workloads operate around the clock and so do we. +Our team operates 24/7 so whenever there’s a problem our experts are +there to help. Operate with confidence - we’ve got your back! diff --git a/website/versioned_docs/version-0.24.0/intro.mdx b/website/versioned_docs/version-0.24.0/intro.mdx new file mode 100644 index 0000000000..8f67976489 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/intro.mdx @@ -0,0 +1,98 @@ +--- +title: Introduction +hide_title: true +--- + +# Weave GitOps + +Weave GitOps is a powerful extension to [Flux][flux], a leading GitOps engine and +CNCF project. Weave GitOps provides insights into your application deployments, +and makes continuous delivery with GitOps easier to adopt and scale across your teams. + +Its web UI surfaces key information to help application operators easily discover +and resolve issues. The intuitive interface provides a guided experience to build +understanding and simplify getting started for new users; they can easily discover +the relationship between Flux objects and navigate to deeper levels of information as required. + +Weave GitOps is an open source project sponsored by [Weaveworks](https://weave.works) - the GitOps company, +and original creators of [Flux][flux]. + +## Getting Started + +To start your own journey with Weave GitOps, please see [Installation](./installation/index.mdx) and [Getting Started](./getting-started/intro.mdx). + +Here is a quick demo of what you can look forward to: + +import ReactPlayer from "react-player/lazy"; + +
+ +
+ +## Features + +OSS Weave GitOps has a number of awesome features to take your team beyond a simple CI/CD system. + +- :female-construction-worker: Drill down into more detailed information on any given Flux resource. +- :mag: Uncover relationships between resources and quickly navigate between them. +- :thinking_face: Understand how workloads are reconciled through a directional graph. +- :goggles: View Kubernetes events relating to a given object to understand issues and changes. +- :no_pedestrians: Secure access to the dashboard through the ability to integrate with an OIDC provider (such as Dex). +- :link: Fully integrates with [Flux](https://fluxcd.io/docs/) as the GitOps engine to provide: + - :infinity: Continuous Delivery through GitOps for apps and infrastructure + - :jigsaw: Support for GitHub, GitLab, Bitbucket, and even use s3-compatible buckets as a source; all major container registries; and all CI workflow providers. + - :key: A secure, pull-based mechanism, operating with least amount of privileges, and adhering to Kubernetes security policies. + - :electric_plug: Compatible with any conformant [Kubernetes version](https://fluxcd.io/docs/installation/#prerequisites) and common ecosystem technologies such as Helm, Kustomize, RBAC, Prometheus, OPA, Kyverno, etc. + - :office: Multitenancy, multiple git repositories, multiple clusters + - :exclamation: Alerts and notifications + +## Weave GitOps Enterprise + +import Link from '@docusaurus/Link'; + +Need even more GitOps? **Weave GitOps Enterprise Edition (WGEE)** has all the features +listed above, plus many more. + +WGEE provides ops teams with an easy way to assess the +health of multiple clusters in a single place. It shows cluster information such as +Kubernetes version and number of nodes and provides details about the GitOps operations +on those clusters, such as Git repositories and recent commits. Additionally, it +aggregates Prometheus alerts to assist with troubleshooting. + +- :boat: **Cluster Fleet Management** +- :closed_lock_with_key: **Trusted Application Delivery** +- :black_right_pointing_double_triangle_with_vertical_bar: **Progressive Delivery** +- :point_up_2: **Self-Service Templates and Profiles** +- :sparkling_heart: **Health Status and Compliance Dashboards** +- :factory_worker::female-factory-worker: **Team Workspaces** +- :compass: **Kubernetes Anywhere** +- :bell: **Critical 24/7 Support** +- :infinity: **CD Pipelines** + +:::tip Want to learn more about how Weave GitOps Enterprise Edition can help your team? +Get in touch with sales@weave.works to discuss your needs. +::: + +## Why adopt GitOps? + +> "GitOps is the best thing since configuration as code. Git changed how we collaborate, but declarative configuration is the key to dealing with infrastructure at scale, and sets the stage for the next generation of management tools" + +- Kelsey Hightower, Staff Developer Advocate, Google.

+ +Adopting GitOps can bring a number of key benefits: + +- Faster and more frequent deployments +- Easy recovery from failures +- Improved security and auditability + +To learn more about GitOps, check out these resources: + +- [GitOps for absolute beginners](https://go.weave.works/WebContent-EB-GitOps-for-Beginners.html) - eBook from Weaveworks +- [Guide to GitOps](https://www.weave.works/technologies/gitops/) - from Weaveworks +- [OpenGitOps](https://opengitops.dev/) - CNCF Sandbox project aiming to define a vendor-neutral, principle-led meaning of GitOps. +- [gitops.tech](https://www.gitops.tech/) - supported by Innoq + +[flux]: https://fluxcd.io diff --git a/website/versioned_docs/version-0.24.0/pipelines/authorization.mdx b/website/versioned_docs/version-0.24.0/pipelines/authorization.mdx new file mode 100644 index 0000000000..d4e0aa206a --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/authorization.mdx @@ -0,0 +1,49 @@ +--- +title: Authorization +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Authorization + + + +This section provides a recommended way to configure RBAC in the context of pipelines. It is oriented to the journey +that you expect your users to have. + +## View pipelines + +In order to view pipelines, users would need to have read access to the `pipeline` resource and the underlying `application` resources. + +An example of configuration to achieve this purpose could be seen below with `pipeline-reader` role and `search-pipeline-reader` +role-binding to allow a group `search-developer` to access pipeline resources within `search` namespace. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pipeline-reader +rules: + - apiGroups: [ "pipelines.weave.works" ] + resources: [ "pipelines" ] + verbs: [ "get", "list", "watch"] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: search-pipeline-reader + namespace: search +subjects: + - kind: Group + name: search-developer + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: pipeline-reader + apiGroup: rbac.authorization.k8s.io +``` diff --git a/website/versioned_docs/version-0.24.0/pipelines/getting-started.mdx b/website/versioned_docs/version-0.24.0/pipelines/getting-started.mdx new file mode 100644 index 0000000000..4fa61c1ff6 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/getting-started.mdx @@ -0,0 +1,103 @@ +--- +title: Getting started +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Getting started with Pipelines + + + +## Prerequisites +Before using Pipelines, please ensure that: +- You have Weave GitOps Enterprise installed on a cluster. +- You have configured Weave GitOps Enterprise [RBAC for Pipelines](../authorization). +- The Pipelines feature flag `enablePipelines` has been enabled. This flag is part of the Weave GitOps Enterprise Helm chart values and is enabled by default. +- Any leaf clusters that are running workloads that you need to visualise using Pipelines, have been added to Weave GitOps Enterprise. +- You have [exposed the promotion webhook ](../promoting-applications/#expose-the-promotion-webhook) on the management cluster and leaf clusters can reach that webhook endpoint over the network. + +## Define a pipeline + +A pipeline allows you to define the route your application is taking in order to make it to production. +There are three main concepts playing in a pipeline: +- the `application` to deliver +- the `environments` that your app will go through in its way to production (general) +- the `deployment targets` or the clusters that each environment has + +You can define a delivery pipeline using a `Pipeline` custom resource. +An example of how it looks for an application `podinfo` is shown below. + +
Expand to view + +```yaml +--- +apiVersion: pipelines.weave.works/v1alpha1 +kind: Pipeline +metadata: + name: podinfo-02 + namespace: flux-system +spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: podinfo + environments: + - name: dev + targets: + - namespace: podinfo-02-dev + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: test + targets: + - namespace: podinfo-02-qa + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - namespace: podinfo-02-perf + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: prod + targets: + - namespace: podinfo-02-prod + clusterRef: + kind: GitopsCluster + name: prod + namespace: flux-system +``` + +
+ +In the example above, the `podinfo` application is delivered to a traditional pipeline composed of `dev`, `test` and `prod` environments. +An environment is used to describe the different stages of a pipeline and is composed of one or more deployment targets. A deployment +target is a combination of a namespace and a [`GitopsCluster` reference](../../cluster-management/managing-existing-clusters/) and is +used to specify where the application is running in our fleet. In this case, the `test` environment is composed of two deployment targets, +`qa` and `perf`, to indicate that although both targets are part of the same stage (testing), they can evolve separately and may run +different versions of the application. Note, that there are two clusters being used for the environments, `dev` and `prod`, both of which +are defined in the `flux-system` namespace. + +For more details about the spec of a pipeline [see here](spec/v1alpha1/pipeline.mdx). + +## View the list of pipelines + +Once flux has reconciled your pipeline you can navigate to the pipelines view to see it. + +![view pipelines](img/view-pipelines.png) + +Pipeline list view show the list of pipelines you have access to. For each pipeline, a simplified view of the pipeline +is shown with the application `Type` and `Environments` it goes through. + +## View the details of a pipeline + +Once you have selected a pipeline from the list, you will navigate to its details view. +In pipeline details view you could view the current status of your application by environment and deployment +target. + +![view pipeline details](img/view-pipeline-details.png) + diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/bot-account.png b/website/versioned_docs/version-0.24.0/pipelines/img/bot-account.png new file mode 100644 index 0000000000..f621f2f022 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/bot-account.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/create-token-with-expiration.png b/website/versioned_docs/version-0.24.0/pipelines/img/create-token-with-expiration.png new file mode 100644 index 0000000000..3479d205a6 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/create-token-with-expiration.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/fine-grained-token.png b/website/versioned_docs/version-0.24.0/pipelines/img/fine-grained-token.png new file mode 100644 index 0000000000..522fa0bd71 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/fine-grained-token.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/manage-fine-grained.png b/website/versioned_docs/version-0.24.0/pipelines/img/manage-fine-grained.png new file mode 100644 index 0000000000..07f0020ba9 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/manage-fine-grained.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/manual-promotion-ui.png b/website/versioned_docs/version-0.24.0/pipelines/img/manual-promotion-ui.png new file mode 100644 index 0000000000..6eb76113b0 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/manual-promotion-ui.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-jenkins/post-content-param.png b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-jenkins/post-content-param.png new file mode 100644 index 0000000000..e9086c79e9 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-jenkins/post-content-param.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-jenkins/token.png b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-jenkins/token.png new file mode 100644 index 0000000000..373d0d10c8 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-jenkins/token.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-table-create.png b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-table-create.png new file mode 100644 index 0000000000..d4e4061b5a Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-table-create.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-templates.png b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-templates.png new file mode 100644 index 0000000000..52dac5871e Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/pipelines-templates.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/promotion-pr.png b/website/versioned_docs/version-0.24.0/pipelines/img/promotion-pr.png new file mode 100644 index 0000000000..74f7733618 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/promotion-pr.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/view-pipeline-details.png b/website/versioned_docs/version-0.24.0/pipelines/img/view-pipeline-details.png new file mode 100644 index 0000000000..a64c3d6e2c Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/view-pipeline-details.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/img/view-pipelines.png b/website/versioned_docs/version-0.24.0/pipelines/img/view-pipelines.png new file mode 100644 index 0000000000..c90eed0708 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/pipelines/img/view-pipelines.png differ diff --git a/website/versioned_docs/version-0.24.0/pipelines/intro.mdx b/website/versioned_docs/version-0.24.0/pipelines/intro.mdx new file mode 100644 index 0000000000..862e53f414 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/intro.mdx @@ -0,0 +1,33 @@ +--- +title: Introduction +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Pipelines + + + +As [wikipedia defines](https://en.wikipedia.org/wiki/Continuous_delivery), Continuous delivery is + +>a software engineering approach in which teams produce software in short cycles, +>ensuring that the software can be reliably released at any time and, when releasing the software, without doing so manually. +>It aims at building, testing, and releasing software with greater speed and frequency. + +>Continuous delivery is enabled through the deployment pipeline. +>The purpose of the deployment pipeline has three components: visibility, feedback, and continually deploy. + +Weave GitOps Enterprise Pipelines allows you to define your deployment pipelines to enable continuous delivery for +your gitops applications. + +As part of Weave GitOps Enterprise, you can + +- [Define a delivery pipeline](../getting-started/#define-a-pipeline) for an application that is packaged as a Helm chart. +- [Visualise a pipeline](../getting-started/#view-pipeline-list) and check the current status and versions of your deployments. +- [Promote applications](../promoting-applications) either automatically or manually via the Weave GitOps Enterprise dashboard. +- Notify other CI tools such as [Tekton](../pipelines-with-tekton) and [Jenkins](../pipelines-with-jenkins) of an application promotion. + + +Now that you know what delivery pipelines can do for you, follow the [guide to get started](../getting-started). \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/pipelines/pipeline-templates.mdx b/website/versioned_docs/version-0.24.0/pipelines/pipeline-templates.mdx new file mode 100644 index 0000000000..63803d97e6 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/pipeline-templates.mdx @@ -0,0 +1,300 @@ +--- +title: Using GitOpsTemplates for Pipelines +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Using GitOpsTemplates for Pipelines + +To create new Pipelines and their required resources from within Weave GitOps +Enterprise, you can leverage [GitOpsTemplates](../../gitops-templates/intro) to help platform teams scale for developer self-service. + +This document will provide example configuration which could be adapted for use within your own organization, based on your [tenancy model](https://kubernetes.io/blog/2021/04/15/three-tenancy-models-for-kubernetes/) of choice. + +We will cover the creation of: +- Pipelines +- Alerts +- Providers + +Secrets, required for authentication and authorization between leaf and management clusters as well as to Git, are out of scope for this document and would need to be handled by your secret management solution of choice. + +For advice on Secrets Management, you can refer to the Flux guide [here](https://fluxcd.io/flux/security/secrets-management/) or contact [Weaveworks](mailto:sales@weave.works) for assistance. + +Templates can include a single resource or multiple resources depending on your use case, for example - you may want to only create the Pipeline custom resource to associate existing HelmReleases, or you could create the HelmReleases, notification controller resources, and Pipeline all in a single template. They are highly customizable to suit your teams' needs. + +## Adding new resources from within the Weave GitOps Enterprise dashboard +GitOpsTemplates are custom resources installed onto the management cluster where Weave GitOps Enterprise resides. To add a new Pipeline, click `Create a Pipeline` from within the Pipeline view, which will take you to a pre-filtered list of templates with the label: `weave.works/template-type: pipeline`. + +![Create Pipeline button in Pipeline view](img/pipelines-table-create.png) + + The `Templates` view (shown below) lists all templates for which a given user has the appropriate permission to view. You can install GitOpsTemplates into different namespaces, and apply standard kubernetes RBAC to limit which teams can utilize which templates. You could additionally configure [Policy](../../policy/intro) to enforce permitted values within a template. + +![Templates view showing Pipeline templates](img/pipelines-templates.png) + +## Example GitOpsTemplates + +This section provides examples to help you build your own templates for Pipelines. + +### Pipeline - Visualization only + +:::tip Included Sample +This template is shipped by default with Weave GitOps Enterprise to help you get started with Pipelines. +::: + +For flexibility, this allows the user of the template to specify the names of the Clusters where the application is deployed, and to vary the namespace per cluster. This means it would even work in a tenancy model where environments co-exist on the same cluster and use namespaces for isolation. + +
Expand to view example template + +```yaml +--- +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: pipeline-sample + namespace: default # Namespace where the GitOpsTemplate is installed, consider that a team will need READ access to this namespace and the custom resource + labels: + weave.works/template-type: pipeline +spec: + description: Sample Pipeline showing visualization of two helm releases across two environments. + params: + - name: RESOURCE_NAME # This is a required parameter name to enable Weave GitOps to write to your Git Repository + description: Name of the Pipeline + - name: RESOURCE_NAMESPACE + description: Namespace for the Pipeline on the management cluster + default: flux-system # default values make it easier for users to fill in a template + - name: FIRST_CLUSTER_NAME + description: Name of GitopsCluster object for the first environment + - name: FIRST_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: FIRST_APPLICATION_NAME + description: Name of the HelmRelease for your application in the first environment + - name: FIRST_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + - name: SECOND_CLUSTER_NAME + description: Name of GitopsCluster object for the second environment + - name: SECOND_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: SECOND_APPLICATION_NAME + description: Name of the HelmRelease for your application in the second environment + - name: SECOND_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + resourcetemplates: + - content: + - apiVersion: pipelines.weave.works/v1alpha1 + kind: Pipeline + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: ${APPLICATION_NAME} + environments: + - name: First-Environment + targets: + - namespace: ${FIRST_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${FIRST_CLUSTER_NAME} + namespace: ${FIRST_CLUSTER_NAMESPACE} + - name: Second-Environment + targets: + - namespace: ${SECOND_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${SECOND_CLUSTER_NAME} + namespace: ${SECOND_CLUSTER_NAMESPACE} +``` + +
+ +### Pipeline - Multi-cluster promotion + +This example extends the above to add a promotion strategy, in this case it will raise a pull request to update the application version in subsequent environments. + +
Expand to view example template + +```yaml +--- +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: pipeline-sample + namespace: default + labels: + weave.works/template-type: pipeline +spec: + description: Sample Pipeline showing visualization of two helm releases across two environments. + params: + - name: RESOURCE_NAME + description: Name of the Pipeline + - name: RESOURCE_NAMESPACE + description: Namespace for the Pipeline on the management cluster + default: flux-system + - name: FIRST_CLUSTER_NAME + description: Name of GitopsCluster object for the first environment + - name: FIRST_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: FIRST_APPLICATION_NAME + description: Name of the HelmRelease for your application in the first environment + - name: FIRST_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + - name: SECOND_CLUSTER_NAME + description: Name of GitopsCluster object for the second environment + - name: SECOND_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: SECOND_APPLICATION_NAME + description: Name of the HelmRelease for your application in the second environment + - name: SECOND_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + - name: APPLICATION_REPO_URL + description: URL for the git repository containing the HelmRelease objects + - name: APPLICATION_REPO_BRANCH + description: Branch to update with new version + - name: GIT_CREDENTIALS_SECRET + description: Name of the secret in RESOURCE_NAMESPACE containing credentials to create pull requests + resourcetemplates: + - content: + - apiVersion: pipelines.weave.works/v1alpha1 + kind: Pipeline + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: ${APPLICATION_NAME} + environments: + - name: First-Environment + targets: + - namespace: ${FIRST_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${FIRST_CLUSTER_NAME} + namespace: ${FIRST_CLUSTER_NAMESPACE} + - name: Second-Environment + targets: + - namespace: ${SECOND_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${SECOND_CLUSTER_NAME} + namespace: ${SECOND_CLUSTER_NAMESPACE} + promotion: + pull-request: + url: ${APPLICATION_REPO_URL} + baseBranch: ${APPLICATION_REPO_BRANCH} + secretRef: + name: ${GIT_CREDENTIALS_SECRET} +``` + +
+ +#### Git credentials +For guidance on configuring credentials - see instructions in the [Promoting Applications](../promoting-applications#create-credentials-secret) documentation. + +#### Promotion marker to be added to HelmRelease in `Second-Environment` +A comment would need to be added to the HelmRelease or Kustomization patch where the `spec.chart.spec.version` is defined. + +For example, if the values used in the above template were as follows: + +```yaml +RESOURCE_NAME=my-app +RESOURCE_NAMESPACE=pipeline-01 +``` + +Then the marker would be: + +```yaml +# {"$promotion": "pipeline-01:my-app:Second-Environment"} +``` + +More guidance on adding markers can be found [here](../promoting-applications#add-markers-to-app-manifests). + +### Alerts and Providers +This example shows how you can configure multiple resources in a single template, and simplify creation through common naming strategies. The notification controller is used to communicate update events from the leaf clusters where Applications are deployed, to the management cluster where the Pipeline Controller resides and orchestrates. + +For the `Alert`, this template is filtering events to detect when an update has occurred. Depending on your use case, you could use different filtering. + +For the `Provider`, this template uses authenticated (HMAC) communication to the promotion endpoint, where a secret will need to be present on both the management cluster and leaf cluster(s). For simplicity, a `generic` provider could be used instead - which would not require the secret. + +
Expand to view example template + +```yaml +--- +apiVersion: templates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: pipeline-notification-resources + namespace: default + labels: + weave.works/template-type: application # These are generic Flux resources rather than Pipeline-specific +spec: + description: Creates flux notification controller resources for a cluster, required for promoting applications via pipelines. + params: + - name: RESOURCE_NAME + description: Name for the generated objects, should match the target Application (HelmRelease) name. + - name: RESOURCE_NAMESPACE + description: Namespace for the generated objects, should match the target Application (HelmRelease) namespace. + - name: PROMOTION_HOST + description: Host for the promotion webhook on the management cluster, i.e. "promotions.example.org" + - name: SECRET_REF + description: Name of the secret containing HMAC key in the token field + - name: ENV_NAME + description: Environment the cluster is a part of within a pipeline. + resourcetemplates: + - content: + - apiVersion: notification.toolkit.fluxcd.io/v1beta1 + kind: Provider + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + address: http://${PROMOTION_HOST}/promotion/${APP_NAME}/${ENV_NAME} + type: generic-hmac + secretRef: ${SECRET_REF} + - apiVersion: notification.toolkit.fluxcd.io/v1beta1 + kind: Alert + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + providerRef: + name: ${RESOURCE_NAME} + eventSeverity: info + eventSources: + - kind: HelmRelease + name: ${RESOURCE_NAME} + exclusionList: + - ".*upgrade.*has.*started" + - ".*is.*not.*ready" + - "^Dependencies.*" +``` + +
+ +## Summary +GitOpsTemplates provide a highly flexible way for platform and application teams to work together with Pipelines. + +You can hard-code values, offer a range of accepted values, or leave open to the consumer of the template to provide input based on your organization's requirements. + +Templates are subject to RBAC as with any Kubernetes resource, enabling you to easily control which tenants have access to which templates. + +For full details on GitOpsTemplates, be sure to read our +[documentation](../../gitops-templates/intro). + + + diff --git a/website/versioned_docs/version-0.24.0/pipelines/pipelines-with-jenkins.mdx b/website/versioned_docs/version-0.24.0/pipelines/pipelines-with-jenkins.mdx new file mode 100644 index 0000000000..07d97f4092 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/pipelines-with-jenkins.mdx @@ -0,0 +1,116 @@ +--- +title: Pipelines With Jenkins Webhook +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Setting Up Pipelines to Notify a Jenkins Webhook + +Using Flux's [Notification +Controller](https://fluxcd.io/flux/components/notification/), a Jenkins Webhook +can be invoked on Pipeline promotion events. + + +## Configuring Jenkins + +To enable external callers to trigger a build on a job, an additional ["Generic +Webhook Trigger" plugin](https://plugins.jenkins.io/generic-webhook-trigger/) is +required as Jenkins does not have this functionality built-in. + +After the plugin is installed a new "Generic Webhook Trigger" job configuration +option is available. + +The only mandatory field is the "Token". Without this token, Jenkins will not +know which build should be triggered. + +![an example token](img/pipelines-jenkins/token.png) + +### Post content parameters + +To access fields from the pipeline event payload, each field has to be defined +as a "Post content parameters". + +![extract reason from the post content](img/pipelines-jenkins/post-content-param.png) + +
Expand to see an example Promotion Event payload + +```json +{ + "involvedObject": { + "kind": "Pipeline", + "namespace": "flux-system", + "name": "podinfo-pipeline", + "uid": "74d9e3b6-0269-4c12-9051-3ce8cfb7886f", + "apiVersion": "pipelines.weave.works/v1alpha1", + "resourceVersion": "373617" + }, + "severity": "info", + "timestamp": "2023-02-08T12:34:13Z", + "message": "Promote pipeline flux-system/podinfo-pipeline to prod with version 6.1.5", + "reason": "Promote", + "reportingController": "pipeline-controller", + "reportingInstance": "chart-pipeline-controller-8549867565-7822g" +} +``` + +
+ +## Configure Notification Provider + +In order to be able to invoke a generic webhook, a notification provider has to +be defined. Jenkins expects the secret token which you configured above as a GET parameter or in the +request header. The secret token can be stored in a Secret: + +```yaml +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: jenkins-token + namespace: podinfo +stringData: + headers: | + token: epicsecret +``` + +Now we can define a Notification Provider using this secret: + +```yaml +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Provider +metadata: + name: jenkins-promotion + namespace: podinfo +spec: + type: generic + address: https://jenkins.domain.tld/generic-webhook-trigger/invoke + secretRef: + name: jenkins-token +``` + + +## Set Up Alerts + +We can configure an Alert to use the `jenkins-promotion` provider. For example +an Alert for the `podinfo-pipeline` in the `flux-system` namespace: + +```yaml +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Alert +metadata: + name: podinfo-pipeline-promotion + namespace: podinfo +spec: + eventSeverity: info + eventSources: + - kind: Pipeline + name: podinfo-pipeline + namespace: flux-system + providerRef: + name: jenkins-promotion +``` + diff --git a/website/versioned_docs/version-0.24.0/pipelines/pipelines-with-tekton.mdx b/website/versioned_docs/version-0.24.0/pipelines/pipelines-with-tekton.mdx new file mode 100644 index 0000000000..a4f3ac0f38 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/pipelines-with-tekton.mdx @@ -0,0 +1,312 @@ +--- +title: Pipelines With Tekton +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Setting Up Pipelines to Trigger a Tekton Pipeline + +Using Flux's [Notification +Controller](https://fluxcd.io/flux/components/notification/), a [Tekton EventListener](https://tekton.dev/docs/triggers/eventlisteners/) can be triggered on Pipeline promotion events. + +## Configuring Tekton Pipeline + +### Tekton Tasks + +In this tutorial, we have two tasks to demonstrate how to use parameter values from the +Pipeline event payload. Both tasks print out messages with information about the +pipeline promotion. Each task has three parameters: `name`, `namespace`, and +`message`. + +```yaml +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: hello + namespace: ww-pipeline +spec: + params: + - name: name + type: string + - name: namespace + type: string + - name: message + type: string + steps: + - name: echo + image: alpine + script: | + #!/bin/sh + echo "Hello $(params.namespace)/$(params.name)!" + echo "Message: $(params.message)" +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: goodbye + namespace: ww-pipeline +spec: + params: + - name: name + type: string + - name: namespace + type: string + - name: message + type: string + steps: + - name: goodbye + image: ubuntu + script: | + #!/bin/bash + echo "Goodbye $(params.namespace)/$(params.name)!" + echo "Message: $(params.message)" +``` + +### Tekton Pipeline + +The `hello-goodbye` Tekton Pipeline has the same three parameters as the tasks +and it passes down the values to them. + +```yaml +--- +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: hello-goodbye + namespace: ww-pipeline +spec: + params: + - name: name + type: string + - name: namespace + type: string + - name: message + type: string + tasks: + - name: hello + taskRef: + name: hello + params: + - name: namespace + value: $(params.namespace) + - name: name + value: $(params.name) + - name: message + value: $(params.message) + - name: goodbye + runAfter: + - hello + taskRef: + name: goodbye + params: + - name: namespace + value: $(params.namespace) + - name: name + value: $(params.name) + - name: message + value: $(params.message) +``` + +## Configuring Tekton Pipline Automation + +In order to be able to trigger a Pipeline from an external source, we need three +Tekton resources. + +1. `TriggerBinding`: This resource binds the incoming JSON message to parameter + variables. +2. `TriggerTemplate`: This resource is the template of the `PipelineRun` that + will be started. +3. `EventListener`: This resource glues the above two resources together and + creates an http listener service. + +### Tekton TriggerBinding + +A JSON payload from the Notification Service about a Pipeline promotion looks +like this: + +```json +{ + "involvedObject": { + "kind": "Pipeline", + "namespace": "flux-system", + "name": "podinfo-pipeline", + "uid": "74d9e3b6-0269-4c12-9051-3ce8cfb7886f", + "apiVersion": "pipelines.weave.works/v1alpha1", + "resourceVersion": "373617" + }, + "severity": "info", + "timestamp": "2023-02-08T12:34:13Z", + "message": "Promote pipeline flux-system/podinfo-pipeline to prod with version 6.1.5", + "reason": "Promote", + "reportingController": "pipeline-controller", + "reportingInstance": "chart-pipeline-controller-8549867565-7822g" +} +``` + +In our tasks, we are using only the `involvedObject.name`, +`involvedObject.namespace` and `message` fields: + +```yaml +--- +apiVersion: triggers.tekton.dev/v1beta1 +kind: TriggerBinding +metadata: + name: ww-pipeline-binding + namespace: ww-pipeline +spec: + params: + - name: namespace + value: $(body.involvedObject.namespace) + - name: name + value: $(body.involvedObject.name) + - name: message + value: $(body.message) +``` + +### Tekton TriggerTemplate + +The template has the same parameters as the `Pipeline` resources: + +```yaml +--- +apiVersion: triggers.tekton.dev/v1beta1 +kind: TriggerTemplate +metadata: + name: ww-pipeline-template + namespace: ww-pipeline +spec: + params: + - name: namespace + default: "Unknown" + - name: name + default: "Unknown" + - name: message + default: "no message" + resourcetemplates: + - apiVersion: tekton.dev/v1beta1 + kind: PipelineRun + metadata: + generateName: hello-goodbye-run- + spec: + pipelineRef: + name: hello-goodbye + params: + - name: name + value: $(tt.params.name) + - name: namespace + value: $(tt.params.namespace) + - name: message + value: $(tt.params.message) +``` + +### Tekton EventListener + +To access all [required resources](https://tekton.dev/docs/getting-started/triggers/#create-an-eventlistener), we need an extra service account: + +```yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-ww-pipeline-robot + namespace: ww-pipeline +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: triggers-example-eventlistener-binding + namespace: ww-pipeline +subjects: +- kind: ServiceAccount + name: tekton-ww-pipeline-robot + namespace: ww-pipeline +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-triggers-eventlistener-roles +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: triggers-example-eventlistener-clusterbinding +subjects: +- kind: ServiceAccount + name: tekton-ww-pipeline-robot + namespace: ww-pipeline +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-triggers-eventlistener-clusterroles +``` + +With this `ServiceAccount`, we can create the `EventListener` using the +`TriggerBinding` and `TriggerTemplate`: + +```yaml +--- +apiVersion: triggers.tekton.dev/v1beta1 +kind: EventListener +metadata: + name: ww-pipeline-listener + namespace: ww-pipeline +spec: + serviceAccountName: tekton-ww-pipeline-robot + triggers: + - name: ww-pipeline-trigger + bindings: + - ref: ww-pipeline-binding + template: + ref: ww-pipeline-template +``` + +At this point, we should have a `Service` for our `EventListener`. + +```bash +❯ kubectl get service -n ww-pipeline +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +el-ww-pipeline-listener ClusterIP 10.96.250.23 8080/TCP,9000/TCP 3d +``` + +## Configure Notification Provider + +In this case, we are using Tekton in the same cluster, so we can use an internal +address to access the `EventListener` service. If they are not in the same +cluster, exposing the service may be required through an ingress or a service mesh. + +```yaml +--- +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Provider +metadata: + name: tekton-promotion + namespace: hello-podinfo +spec: + type: generic + address: http://el-ww-pipeline-listener.ww-pipeline:8080/ +``` + +## Set Up Alerts + +We can configure an Alert to use the `tekton-promotion` provider. For example, +an Alert for the `podinfo-pipeline` in the `flux-system` namespace: +```yaml +--- +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Alert +metadata: + name: tekton-promotion-podinfo + namespace: hello-podinfo +spec: + eventSeverity: info + eventSources: + - kind: Pipeline + name: hello-podinfo + namespace: flux-system + providerRef: + name: tekton-promotion +``` diff --git a/website/versioned_docs/version-0.24.0/pipelines/promoting-applications.mdx b/website/versioned_docs/version-0.24.0/pipelines/promoting-applications.mdx new file mode 100644 index 0000000000..4db86b51aa --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/promoting-applications.mdx @@ -0,0 +1,484 @@ +--- +title: Promoting applications +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import AlphaWarning from "../_components/_alpha_warning.mdx"; +import ManualPromotionUI from "./img/manual-promotion-ui.png"; + + +# Promoting applications through pipeline environments + + + + +Pipelines allow you to configure automatic promotions of applications through a consecutive set of environments, e.g. from dev to staging to production. The environments are defined in the `Pipeline` resource itself so that each pipeline governs a single application and all the environments to which it is deployed. + +:::info +At the moment only applications defined as Flux `HelmReleases` are supported in automatic promotions. +::: + +
+ +![an example promotion PR](img/promotion-pr.png) + +
An example of a pull request for an application promotion
+
+ +The [Getting started guide](../getting-started) describes how to create a basic pipeline for an application so you can visualize its deployments across a series of environments. You may also configure a pipeline in order to promote applications across a series of environments. +There are currently two supported strategies for application promotions: +- Pull request strategy: this strategy is used for applications that are delivered via Flux to all environments of a pipeline. Typically, the versions of these applications are stored in Git and therefore pull requests can be used to update them as part of a promotion. +- Notification strategy: this strategy is used when an external CI system is responsible for promoting an application across the environments of a pipeline. In this strategy, the notification controller running on the management cluster is used to forward notifications of succesful promotions to external CI systems. + +Before configuring any of the above promotion strategies, you need to setup notifications from all your environments so that whenever a new version gets deployed, the promotion webhook component of the pipeline controller is notified and takes an action based on the pipeline definition. The rest of this guide describes the configuration needed to setup application promotion via pipelines. + +## Expose the promotion webhook + +Applications deployed in leaf clusters use the Flux notification controller running on each leaf cluster, to notify the management cluster of a successful promotion. This requires network connectivity to be established between the leaf cluster and the management cluster. + +The component responsible for listening to incoming notifications from leaf clusters is the pipeline controller. It hosts a webhook service that needs to be exposed via an ingress resource to make it available for external calls. Exposing the webhook service is done via the Weave GitOps Enterprise Helm chart values and the configuration used depends on your environment. The example below shows the configuration for NGINX ingress controller and needs to be adjusted if another ingress controller is used: + +```yaml +spec: + values: + enablePipelines: true + pipeline-controller: + promotion: + ingress: + enabled: true + className: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt + hosts: + - host: promotions.example.org + paths: + - path: /?(.*) + pathType: ImplementationSpecific + tls: + - secretName: promotions-tls + hosts: + - promotions.example.org +``` + +You will need the externally reachable URL of this service later on in this guide. + +## Setup notifications from leaf clusters + +Once the webhook service is exposed over HTTP/S, you need to create alert/provider resources to send notifications to it from leaf clusters. These notifications represent successful promotions for applications running on the leaf clusters. + +Successful promotion events are triggered by Flux's [notification controller](https://fluxcd.io/flux/components/notification/). You create a Provider pointing to the promotion webhook exposed earlier and an Alert targeting the app's HelmRelease: + +```yaml +--- +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Provider +metadata: + name: promotion-my-app +spec: + address: "https://promotions.example.org/promotion/pipeline-01/my-app/dev" + type: generic-hmac + secretRef: + name: hmac-secret +``` + +In the example above, the `generic-hmac` Provider is used to ensure notifications originate from authenticated sources. The referenced Secret, should include a `token` field which holds the HMAC key. The same HMAC key must be specified in the Secret referenced by the `.spec.promotion.strategy.secretRef.name` field, so that the pipeline controller can verify any incoming notifications. For more information on the `generic-hmac` Provider, please refer to the notification controller [docs](https://fluxcd.io/flux/components/notification/provider/#generic-webhook-with-hmac). + +Note that by default, the promotion webhook endpoint is exposed at `/promotion` as shown in the example above. However you may use rewrite rules in your ingress configuration to omit it, if desired. For example, if using NGINX ingress controller, you may use the following annotation: +```yaml +annotations: + nginx.ingress.kubernetes.io/rewrite-target: /promotion/$1 +``` +The Provider address can then be set as `https://promotions.example.org/pipeline-01/my-app/dev`. + +:::tip +You may also use the [generic webhook provider type that supports HMAC verification](https://fluxcd.io/flux/components/notification/provider/#generic-webhook-with-hmac) to ensure incoming notifications originate from authenticated sources. +::: + +The `address` field's URL path is comprised of 3 components again: + +1. The namespace of the app's pipeline. +1. The name of the pipeline resource. +1. The origin environment's name. This is the name of the environment that the event is created in, e.g. "dev" for events coming from the "dev" environment. + +Weave GitOps Enterprise can then parse the incoming URL path to identify the pipeline resource and look up the next environment for the defined promotion action. + +An example Alert might look like this: + +```yaml +--- +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Alert +spec: + eventSeverity: info + eventSources: + - kind: HelmRelease + name: my-app + exclusionList: + - .*upgrade.*has.*started + - .*is.*not.*ready + - ^Dependencies.* + providerRef: + name: promotion-my-app +``` + +:::tip +Be sure to create the Provider/Alert tuple on **each of the leaf clusters +targeted by a pipeline**. +::: + +Now as soon as the `HelmRelease` on the first environment defined in the pipeline is bumped (e.g. by Flux discovering a new version in the Helm repository), an event is sent to the promotion webhook which will determine the next action based on the pipeline definition and chosen strategy. The rest of this guide describes how to setup up any of the available strategies depending on your requirements. + +## Pull request + +This section covers adding a promotion by pull request (PR) strategy, so that whenever the application defined in a pipeline +is upgraded in one of the pipeline's environments, a PR is created that updates the manifest file setting the application version in the next environment. + +The dynamic nature of GitOps deployments requires you to assist Weave GitOps a little with information on which repository hosts the manifest files, +how to authenticate with the repository and the Git provider API, and which file hosts the version definition for each environment. + +:::caution + +Creating pull requests requires read and write access in your git repo. +A compromised token could lead to changing the git repository and creation of legitimate looking changes/pull requests. + +Ensure you understand and adopt [security recommendations](./#security-recommendations) before using the feature. + +::: + +### Supported Git Providers +The following Git providers are currently support by this promotion strategy: + +- [GitHub](https://github.com/) +- [GitLab](https://gitlab.com/) +- [BitBucket Server / DataCenter](https://www.atlassian.com/software/bitbucket/enterprise) + +Select your Git provider via `.spec.promotion.strategy.pull-request.type`. For example, for `gitlab` it would look similar to: + +```yaml +promotion: + strategy: + pull-request: + type: gitlab + url: "https://gitlab.com/weaveworks/" + baseBranch: main + secretRef: + name: gitlab-promotion-credentials +``` + +More info in the [spec](../spec/v1alpha1/pipeline/#pipeline). + +### Credentials Secret + +In the journey of creating a pull request, there are different secrets involved: + +1. Pipeline controller receives events via [webhook from leaf clusters](./#setup-notifications-from-leaf-clusters). Hmac is used for authN/authZ so an hmac key should be provided in this case. +2. Pipeline controller clones and patches manifests to promote from the pipeline configuration repo. A set of [git credentials](https://fluxcd.io/flux/components/source/gitrepositories/#secret-reference) are required. +3. Pipeline controller uses git provider api to create the pull request with the promoted manifests. A Personal Access Token (PAT) needs to be created to interact with pipelines git provider API. This PAT is also used to list pull requests from the configured repository. + +Create a Kubernetes secret with the previous data. + +
Expand to see example + +```shell +# example to use git over https with basic auth and pat +$ kubectl create secret generic promotion-credentials \ + --namespace=pipeline-01 \ + --from-literal="username=" \ + --from-literal="password=" \ + --from-literal="token=" \ + --from-literal="hmac-key=" +``` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: promotion-credentials + namespace: pipeline-01 +data: + username: ZXhhbXBsZQ== + password: ZXhhbXBsZS1wYXNzd29yZA== + token: Z2hwX01IL3RsTFpXTXZMY0FxVWRYY1ZGL0lGbzh0WDdHNjdsZmRxWQ== + hmac-key: OEIzMTNBNjQ0REU0OEVGODgxMTJCQ0VFNTQ3NkE= +type: Opaque +``` + +:::tip +- The Git provider token provided in the `token` field needs to be given permission to create pull requests in the pipeline's repository (defined in `.spec.promotion.strategy.pull-request.url`). +- The `hmac-key` field must match the key used for the Provider resource (.spec.secretRef), if specified in the leaf clusters. +::: + +
+ +### Define promotion in pipeline resource + +The field `.spec.promotion.strategy.pull-request` defines details about the Git repository used for promoting the given app. +Set the `secretRef.name` field to the name of the Secret created in the previous step and the `url` and `branch` fields to the +Git repository's HTTPS URL and optionally a specific branch (if the branch is not set, it defaults to `main`). +If using the `generic-hmac` Provider from leaf clusters, also set the `.spec.promotion.strategy.secretRef.name` to the name of the Secret created previously. + +More info in the [spec](../spec/v1alpha1/pipeline/#pipeline) + +### Security Recommendations + +:::tip + +Adopt as much of the recommendations in this section to reduce the risks associated with the secrets involved in pull requests. + +::: + +1. **Create a user account for pull request changes**: this user context would be used to do any git provider operation, +and for security and auditing perspective, you don't want to impersonate a real user for it. + +
Expand to see example + +![using bot account](img/bot-account.png) +
+ +2. **Restrict access to the secret**: the promotion credentials needs to reside in the same Namespace as the Pipeline resource on the management cluster. Restrict +via RBAC that only `pipeline-controller` service account is able to read it. + +
Expand to see example + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: read-app-promotion-credentials + namespace: app +rules: + - apiGroups: + - "" + resourceNames: + - "app-promotion-credentials" + resources: + - "secrets" + verbs: + - "get" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipeline-controller-read-app-promotion-credentials + namespace: app +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: read-app-promotion-credentials +subjects: + - kind: ServiceAccount + name: pipeline-controller + namespace: pipeline-system +``` +
+ +3. **Do not use long-live tokens**: set an expiration date and rotate them according to your security policy. + +
Expand to see example + +![create token with expiration](img/create-token-with-expiration.png) +
+ +4. **Honour the least privilege principle**: avoid having high privilege tokens. Restrict the token to your just your repo and to just the operations required. + +
Expand to see example + +For example, if the case of GitHub, use [fine-grained tokens](https://github.blog/2022-10-18-introducing-fine-grained-personal-access-tokens-for-github/) to only +allow access to the single repo that your configuration manifests exist. + +![create token least priviledge](img/fine-grained-token.png) +
+ +5. **Review active access tokens on a regular basis**: to ensure that only the ones that are required are present at all times. + +
Expand to see example + +For example, using github and fine-grained tokens you [could do so](https://github.blog/2022-10-18-introducing-fine-grained-personal-access-tokens-for-github/#approving-and-auditing-personal-access-tokens). + +![review tokens](img/manage-fine-grained.png) +
+ +6. **Review git provider recommendations and examples** +- [GitHub](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) +- [GitLab](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) + + +### Add markers to app manifests + +The discovery of the version field is done using deterministic markers in a YAML manifest file. An example `HelmRelease` manifest with such a marker looks like this: + +```yaml {7} +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +spec: + chart: + spec: + version: 0.13.7 # {"$promotion": "pipeline-01:my-app:prod"} +``` + +The value of the `$promotion` field in the comment is comprised of 3 components separated by colons: + +1. The first field is the Namespace of the pipeline resource that the app is part of. In the example above this is `pipeline-01`. +1. The second field denotes the name of the pipeline resource. +1. The third field is the name of the environment that this specific HelmRelease targets. The environment name in the marker needs to match with the `name` field of one of the environments defined in the pipeline's `.spec.environments` array. + +Weave GitOps Enterprise will look for this marker whenever it receives an event from the respective HelmRelease of one of the leaf clusters and patch the file with the version denoted in the event (see the section above for instructions on setting up notification events from leaf clusters). Finally, it will create a Git provider PR to update the version of the application for the next environment in the pipeline. + +## Notification + +This section explains how to configure pipelines to work with external CI systems that are responsible for application promotions. + +This strategy uses the notification controller running on the management cluster, to forward any notifications received by the promotion webhook, from leaf clusters to external CI systems. This requires to [patch](https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-notifications-for-third-party-controllers) the Flux manifests of the management cluster, in order to allow objects of type `Pipeline` to be used as event sources. An example of a patch applied to enable this is shown below: + +```yaml +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- gotk-components.yaml +- gotk-sync.yaml +patches: +- patch: | + - op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/eventSources/items/properties/kind/enum/- + value: Pipeline + target: + kind: CustomResourceDefinition + name: alerts.notification.toolkit.fluxcd.io +``` + +You can now create Provider/Alert resources on the management cluster to forward notifications to external systems. For example, the Provider resource shown below is used to invoke a GitHub Actions workflow on a repository: +```yaml +--- +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Provider +metadata: + name: promotion-my-app-via-github-actions +spec: + type: githubdispatch + address: https://github.com/my-org/my-app-repo + secretRef: + name: github-credentials +``` + +To use this Provider, add an Alert that uses the pipeline resource defined on the management cluster as an event source. An example of such an Alert is shown below: + +```yaml +--- +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Alert +metadata: + name: promotion-my-app-via-github-actions +spec: + eventSeverity: info + eventSources: + - kind: Pipeline + name: my-app + namespace: my-app-ns + providerRef: + name: promotion-my-app-via-github-actions +``` + +The notification controller running on the management cluster is now configured to forward any promotion notifications received from leaf clusters. To actually use this strategy from a pipeline, set the promotion field as shown below: + +```yaml {8-9} +--- +apiVersion: pipelines.weave.works/v1alpha1 +kind: Pipeline +metadata: + name: my-app + namespace: my-app-ns +spec: + promotion: + notification: {} +``` + +Promotion notifications from leaf clusters should now be forwarded via the notification controller running on the management cluster and should include information about the version of the application being promoted. + +## Manual promotion + +The supported strategies mentioned above, do not require any user interaction when handling promotions. However, there is often a need for a human operator to manually approve a promotion to the next environment. To achieve that, set the `spec.promotion.manual` key to `true`. + +
Expand to see example + +```yaml {8} +apiVersion: pipelines.weave.works/v1alpha1 +kind: Pipeline +metadata: + name: my-app + namespace: my-app-ns +spec: + promotion: + manual: true + strategy: + pull-request: + type: github + url: https://github.com/my-org/my-app-repo + baseBranch: main + secretRef: + name: promotion-credentials +``` + +
+ +When this key is set and a promotion is detected, Weave GitOps will prompt the user to manually promote the application to the next environment, via the use of a button shown under the next environment. + +
+ + + +
Manual promotion of an application
+
+ +## Configuration + +### Retry Logic + +By default if a promotion fails, an exponential back-off retry happens and +returns with an error only after three retries. + +Through Helm values, the retry logic is configurable. + +```yaml +# values.yaml +promotion: + retry: + # Initial delay between retries. + delay: 2 + # Maximum delay between retries. + maxDelay: 20 + # Number of attempts. + threshold: 3 +``` + +The promotion happens through an HTTP endpoint call, that endpoint may has +connection timeout limits, that's why the `maxDelay` option is there. If the +calculated delay would exceed this value, it will use that as delay. For example +if the delay values would be `[2, 4, 8, 16, 32, 64]`, but `maxDelay` is set to +15, the list will be `[2, 4, 8, 15, 15, 15]`. With this option, the promotion +will be retried on failure, but the sum of delay values will be only 59 seconds +instead of 126 seconds. + +### Rate Limiting + +The promotion endpoint can be exposed to the internet (for example github +actions), to mitigate DoS attacks, the endpoint has rate limits. By default it's +20 requests per 30 seconds. + +Rate limiting can be configured through Helm values: + +```yaml +# values.yaml +promotion: + rateLimit: + # Number of requests allowed in set interval. + value: 20 + interval: 30 +``` diff --git a/website/versioned_docs/version-0.24.0/pipelines/spec/index.mdx b/website/versioned_docs/version-0.24.0/pipelines/spec/index.mdx new file mode 100644 index 0000000000..1cb0a00637 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/spec/index.mdx @@ -0,0 +1,8 @@ +--- +title: Pipeline versions +hide_title: true +--- + +## Versions + +- [v1alpha1](./v1alpha1/pipeline.mdx) diff --git a/website/versioned_docs/version-0.24.0/pipelines/spec/v1alpha1/pipeline.mdx b/website/versioned_docs/version-0.24.0/pipelines/spec/v1alpha1/pipeline.mdx new file mode 100644 index 0000000000..b9c5475e1d --- /dev/null +++ b/website/versioned_docs/version-0.24.0/pipelines/spec/v1alpha1/pipeline.mdx @@ -0,0 +1,238 @@ +--- +title: Pipeline +hide_title: true +--- +import TierLabel from "../../../_components/TierLabel"; + +# Pipeline + +The Pipeline API defines a resource for continuous delivery pipelines. + +An example of a fully defined pipeline that creates pull requests for application promotions is shown below. + +```yaml +apiVersion: pipelines.weave.works/v1alpha1 +kind: Pipeline +metadata: + name: podinfo-02 + namespace: flux-system +spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: podinfo + environments: + - name: dev + targets: + - namespace: podinfo-02-dev + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: test + targets: + - namespace: podinfo-02-qa + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - namespace: podinfo-02-perf + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: prod + targets: + - namespace: podinfo-02-prod + clusterRef: + kind: GitopsCluster + name: prod + namespace: flux-system + promotion: + strategy: + pull-request: + type: github + url: https://github.com/my-org/my-app-repo + baseBranch: main + secretRef: + name: github-credentials +``` + +## Specification + +The documentation for version `v1alpha1` of a `Pipeline` resource is found next. + +### Pipeline + + +```go +// Pipeline is the Schema for the pipelines API +type Pipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PipelineSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status PipelineStatus `json:"status,omitempty"` +} + +type PipelineSpec struct { + // Environments is a list of environments to which the pipeline's application is supposed to be deployed. + // +required + Environments []Environment `json:"environments"` + // AppRef denotes the name and type of the application that's governed by the pipeline. + // +required + AppRef LocalAppReference `json:"appRef"` + // Promotion defines details about how promotions are carried out between the environments + // of this pipeline. + // +optional + Promotion *Promotion `json:"promotion,omitempty"` +} + +type Environment struct { + // Name defines the name of this environment. This is commonly something such as "dev" or "prod". + // +required + Name string `json:"name"` + // Targets is a list of targets that are part of this environment. Each environment should have + // at least one target. + // +required + Targets []Target `json:"targets"` + // Promotion defines details about how the promotion is done on this environment. + // +optional + Promotion *Promotion `json:"promotion,omitempty"` +} + +type Target struct { + // Namespace denotes the namespace of this target on the referenced cluster. This is where + // the app pointed to by the environment's `appRef` is searched. + // +required + Namespace string `json:"namespace"` + // ClusterRef points to the cluster that's targeted by this target. If this field is not set, then the target is assumed + // to point to a Namespace on the cluster that the Pipeline resources resides on (i.e. a local target). + // +optional + ClusterRef *CrossNamespaceClusterReference `json:"clusterRef,omitempty"` +} + +// Promotion define promotion configuration for the pipeline. +type Promotion struct { + // Manual option to allow promotion between to require manual approval before proceeding. + // +optional + Manual bool `json:"manual,omitempty"` + // Strategy defines which strategy the promotion should use. + Strategy Strategy `json:"strategy"` +} + +// Strategy defines all the available promotion strategies. All of the fields in here are mutually exclusive, i.e. you can only select one +// promotion strategy per Pipeline. Failure to do so will result in undefined behaviour. +type Strategy struct { + // PullRequest defines a promotion through a Pull Request. + // +optional + PullRequest *PullRequestPromotion `json:"pull-request,omitempty"` + // Notification defines a promotion where an event is emitted through Flux's notification-controller each time an app is to be promoted. + // +optional + Notification *NotificationPromotion `json:"notification,omitempty"` + // SecrefRef reference the secret that contains a 'hmac-key' field with HMAC key used to authenticate webhook calls. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` +} +type GitProviderType string + +const ( + Github GitProviderType = "github" + Gitlab GitProviderType = "gitlab" + BitBucketServer GitProviderType = "bitbucket-server" +) + +type PullRequestPromotion struct { + // Indicates the git provider type to manage pull requests. + // +required + // +kubebuilder:validation:Enum=github;gitlab;bitbucket-server + Type GitProviderType `json:"type"` + // The git repository HTTPS URL used to patch the manifests for promotion. + // +required + URL string `json:"url"` + // The branch to checkout after cloning. Note: This is just the base + // branch that will eventually receive the PR changes upon merge and does + // not denote the branch used to create a PR from. The latter is generated + // automatically and cannot be provided. + // +required + BaseBranch string `json:"baseBranch"` + // SecretRef specifies the Secret containing authentication credentials for + // the git repository and for the Git provider API. + // For HTTPS repositories the Secret must contain 'username' and 'password' + // fields. + // For Git Provider API to manage pull requests, it must contain a 'token' field. + // +required + SecretRef meta.LocalObjectReference `json:"secretRef"` +} + +type NotificationPromotion struct{} + +``` + +### References + +```go +// LocalAppReference is used together with a Target to find a single instance of an application on a certain cluster. +type LocalAppReference struct { + // API version of the referent. + // +required + APIVersion string `json:"apiVersion"` + + // Kind of the referent. + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// CrossNamespaceClusterReference contains enough information to let you locate the +// typed Kubernetes resource object at cluster level. +type CrossNamespaceClusterReference struct { + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent. + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` + + // Namespace of the referent, defaults to the namespace of the Kubernetes resource object that contains the reference. + // +optional + Namespace string `json:"namespace,omitempty"` +} +``` + +### Status + +```go +type PipelineStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Pipeline. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} +``` + +#### Condition Reasons +```go +// Reasons are provided as utility, and are not part of the declarative API. +const ( + // TargetClusterNotFoundReason signals a failure to locate a cluster resource on the management cluster. + TargetClusterNotFoundReason string = "TargetClusterNotFound" + // TargetClusterNotReadyReason signals that a cluster pointed to by a Pipeline is not ready. + TargetClusterNotReadyReason string = "TargetClusterNotReady" + // ReconciliationSucceededReason signals that a Pipeline has been successfully reconciled. + ReconciliationSucceededReason string = "ReconciliationSucceeded" +) +``` + diff --git a/website/versioned_docs/version-0.24.0/policy/authorization.mdx b/website/versioned_docs/version-0.24.0/policy/authorization.mdx new file mode 100644 index 0000000000..b195e33957 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/authorization.mdx @@ -0,0 +1,48 @@ +--- +title: Authorization +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Authorization + +This section provides a recommended way to configure RBAC in the context of policies. It is oriented to the journey +that you expect your users to have. + +## View Resources + +The policy journey in the UI involves several resources. We have the [Policies](./policy.mdx) that are used by the agent, the resulting [Violations](./getting-started.mdx) when the agent enforces those policies, and the [PolicyConfigs](./policy-configuration.mdx) that the user can configure to override policy parameters. +The violations are essentially kubernetes events that contain the [Validation](./policy.mdx#policy-validation) object. + +In order to view those resources, users would need to have read access to the `policies`, `policysconfigs`, and `events` resource. + +An example of a configuration to achieve this purpose could be seen below with `policies-reader` role and `developer-policies-reader` +cluster role binding, to allow a group `developer` to access all the policy-related resources. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: policies-reader +rules: + - apiGroups: ["pac.weave.works"] + resources: ["policies", "policyconfigs"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: developer-policies-reader +subjects: + - kind: Group + name: developer + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: policies-reader + apiGroup: rbac.authorization.k8s.io +``` diff --git a/website/versioned_docs/version-0.24.0/policy/commit-time-checks.mdx b/website/versioned_docs/version-0.24.0/policy/commit-time-checks.mdx new file mode 100644 index 0000000000..207920ea08 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/commit-time-checks.mdx @@ -0,0 +1,212 @@ +--- +title: Commit/Build Time Checks +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Commit/Build Time Checks + +## Overview +Weave GitOps Enterprise enables developers and operators to check policy violations early in their software development life cycle, specifically at commit and build time. Developers and operators can have Weave Policy Validator integrated in their CI tools to validate whether their code changes are violating any policies or not. + +Weave GitOps Enterprise offer a policy engine image that can be used to perform commit/build time checks.The image can be found on Docker Hub under the name: `weaveworks/weave-iac-validator:v1.1`. + +
Expand to view of the usage options + + ```bash + USAGE: + app [global options] command [command options] [arguments...] + + VERSION: + 0.0.1 + + COMMANDS: + help, h Shows a list of commands or help for one command + + GLOBAL OPTIONS: + --path value path to scan resources from + --helm-values-file value path to resources helm values file + --policies-path value path to policies kustomization directory + --policies-helm-values-file value path to policies helm values file + --git-repo-provider value git repository provider + --git-repo-host value git repository host + --git-repo-url value git repository url + --git-repo-branch value git repository branch + --git-repo-sha value git repository commit sha + --git-repo-token value git repository toke + --azure-project value azure project name + --sast value save result as gitlab sast format + --sarif value save result as sarif format + --json value save result as json format + --generate-git-report generate git report if supported (default: false) + --remediate auto remediate resources if possible (default: false) + --no-exit-error exit with no error (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + ``` + +
+ +--- +## Setup policies +Policies can be a helm chart, kustomize directory or just plain kubernetes yaml files. + +Example of policies kustomize directory +```bash +└── policies + ├── kustomization.yaml + ├── minimum-replica-count.yaml + ├── privileged-mode.yaml + └── privilege-escalation.yaml +``` + +```yaml +# kustomization.yaml +kind: Kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +resources: +- minimum-replica-count.yaml +- privilege-escalation.yaml +- privileged-mode.yaml +``` +--- +## Supported CI/CD +- [x] [Github](#github) +- [x] [Github Enterprise](#github) +- [x] [Gitlab](#gitlab) +- [x] [Bitbucket](#bitbucket) +- [x] [Circle CI](#circle-ci) +- [x] [Azure Devops](#azure-devops) +--- +## Auto-Remediation +Weave validator supports auto-remediation functionality which creates a pull request with suggested fixes to remediate the reported violations. + +Supported in: +- [ ] Helm +- [x] Kustomize +- [x] Plain kubernetes files + +To enable it you need to provide ```--remediate``` flag and ```--git-repo-token```. + +> The token must have the permission to create a pull request. + +--- +## UseCase: Github +See how to setup the [Github Action](https://github.com/weaveworks/weave-action) + +--- +## UseCase: Gitlab + +```yaml +weave: + image: + name: weaveworks/weave-iac-validator:v1.1 + script: + - weave-validator --path --policies-path +``` + +#### Enable Auto Remediation + +```yaml + script: + - weave-validator --path --policies-path --git-repo-token $GITLAB_TOKEN --remediate +``` +--- +#### Enable Static Application Security Testing + +```yaml +stages: + - weave + - sast + +weave: + stage: weave + image: + name: weaveworks/weave-iac-validator:v1.1 + script: + - weave-validator --policies-path --sast sast.json + artifacts: + when: on_failure + paths: + - sast.json + +upload_sast: + stage: sast + when: always + script: + - echo "creating sast report" + artifacts: + reports: + sast: sast.json +``` +--- +## UseCase: Bitbucket + +```yaml +pipelines: + default: + - step: + name: 'Weaveworks' + image: weaveworks/weave-iac-validator:v1.1 + script: + - weave-validator --path --policies-path +``` +#### Enable Auto Remediation + +```yaml + script: + - weave-validator --path --policies-path --git-repo-token $TOKEN --remediate +``` + +#### Create Pipeline Report + +```yaml + script: + - weave-validator --path --policies-path --git-repo-token $TOKEN -generate-git-report +``` + +--- +## UseCase: CircleCI + +```yaml +jobs: + weave: + docker: + - image: weaveworks/weave-iac-validator:v1.1 + steps: + - checkout + - run: + command: weave-validator --path --policies-path +``` + +#### Enable Auto Remediation + +```yaml + - run: + command: weave-validator --path --policies-path --git-repo-token ${GITHUB_TOKEN} --remediate +``` + +--- +## UseCase: Azure DevOps + +```yaml +trigger: +- + +pool: + vmImage: ubuntu-latest + +container: + image: weaveworks/weave-iac-validator:v1.1-azure + +steps: +- script: weave-validator --path --policies-path --git-repo-token $(TOKEN) +``` + +#### Enable Auto Remediation + +```yaml +steps: +- script: weave-validator --path --policies-path --git-repo-token $(TOKEN) --remediate +``` diff --git a/website/versioned_docs/version-0.24.0/policy/getting-started.mdx b/website/versioned_docs/version-0.24.0/policy/getting-started.mdx new file mode 100644 index 0000000000..8e7fcb5402 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/getting-started.mdx @@ -0,0 +1,119 @@ +--- +title: Getting Started +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Getting Started +Enabling the Weave Policy Engine features in Weave GitOps is done by running the policy agent on the cluster. This section gives an overview of the policy ecosystem and the steps required for installing and running the policy agent on leaf clusters. + +## The Policy Ecosystem + +The policy ecosystem consists of several moving parts. The two primary components are the [Policy Agent](./weave-policy-profile.mdx#policy-agent-configuration) and the [Policy CRs](./policy.mdx). The agent runs in several [modes](./weave-policy-profile.mdx#agent-modes), and uses the Policy CRs to perform validations on different resources. The results of those validations can be written to different [sinks](./weave-policy-profile.mdx#policy-validation-sinks). + +There are two other optional components: the [PolicySet](./policy-set.mdx), and the [PolicyConfig](./policy-configuration.mdx). The PolicySet can be used to filter policies for a specific mode, while the PolicyConfig can be used to override policy parameters during the validation of a certain resource. + +![Policy Ecosystem](./img/policy-ecosystem.png) + +## Installation Pre-requisites + +### Weave GitOps +You need to have a running instance of Weave GitOps with at least one CAPI provider installed to provision Kubernetes clusters. See [Weave GitOps Installation](https://docs.gitops.weave.works/docs/installation/) page for more details about installing Weave GitOps. + +### Policy Library +For the policy agent to work, it will need a source for the [policies](./policy.mdx) that it will enforce in the cluster. Enterprise customers should request access to fork our policy library into their local repositories. Our policy library includes an extensive list of policy CRs that cover a multitude of security and compliance benchmarks. + +## Install the Policy Agent + +To install the policy agent on a leaf cluster, you should select the `weave-policy-agent` from the profiles dropdown in the `Create Cluster` page. + +![Policy Profile](./img/weave-policy-profile.png) + +You should then configure the `values.yaml` to pull the policies from your repo into the cluster. This is done by configuring the `policySource` section. If your policy library repo is private, you will also need to reference the `Secret` that contains the repo credentials. This is usually the [secret](../cluster-management/getting-started.mdx#add-a-cluster-bootstrap-config) you created while bootstrapping flux on the management cluster and is copied to your leaf cluster during creation. + +
Expand to see an example that creates a new git source + +```yaml +policySource: + enabled: true + url: ssh://git@github.com/weaveworks/policy-library # This should be the url of the forked repo + tag: v1.0.0 + path: ./ # Could be a path to the policies dir or a kustomization.yaml file + secretRef: my-pat # the name of the secret containing the repo credentials +``` +
+ +
Expand to see an example that uses an existing git source + +```yaml +policySource: + enabled: true + sourceRef: # Specify the name for an existing GitSource reference + kind: GitRepository + name: policy-library + namespace: flux-system +``` +
+ +You can find more about other policy profile configurations [here](../weave-policy-profile/). + +## Policies in UI +After the leaf cluster is provisioned and the profile is installed, you should now see the policies listed in the Policies tab in Weave GitOps UI. + +![Policies](./img/weave-policies.png) + +Now you have a provisioned cluster with these policies enforced by the policy agent. + +> By default, the policy profile is set up to enforce policies at deployment time using admission controller, which results in blocking any deployment that violates the enforced policies. + +## Prevent Violating Changes +Now let's try to deploy a Kubernetes deployment that violates the `Container Image Pull Policy` which is one of the enforced policies. +This policy is violated when the container's `imagePullPolicy` is not set to `Always`. + +
Expand for an example of a violating deployment + + ```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + ``` + +
+ + +Once you apply it, the policy agent will deny this request and show a violation message, and accordingly the deployment will not be created. + +## Violations Logs in UI +You can go to the `Violations Log` in Weave GitOps UI to view the policy violations of all the connected clusters, and dive into the details of each violation. + +This view shows only the violations resulting from the [admission](./weave-policy-profile.mdx#admission) mode by configuring the [events sink](weave-policy-profile.mdx#policy-validation-sinks). + +Violations Log + +![Violations Logs](./img/violations-logs.png) + +Violations Log Details + +![Violation Log Details](./img/violations-log-detail.png) diff --git a/website/versioned_docs/version-0.24.0/policy/img/policy-ecosystem.png b/website/versioned_docs/version-0.24.0/policy/img/policy-ecosystem.png new file mode 100644 index 0000000000..1f0c8c50ec Binary files /dev/null and b/website/versioned_docs/version-0.24.0/policy/img/policy-ecosystem.png differ diff --git a/website/versioned_docs/version-0.24.0/policy/img/violations-log-detail.png b/website/versioned_docs/version-0.24.0/policy/img/violations-log-detail.png new file mode 100644 index 0000000000..a180387bf7 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/policy/img/violations-log-detail.png differ diff --git a/website/versioned_docs/version-0.24.0/policy/img/violations-logs.png b/website/versioned_docs/version-0.24.0/policy/img/violations-logs.png new file mode 100644 index 0000000000..58773740d9 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/policy/img/violations-logs.png differ diff --git a/website/versioned_docs/version-0.24.0/policy/img/weave-policies.png b/website/versioned_docs/version-0.24.0/policy/img/weave-policies.png new file mode 100644 index 0000000000..bff055674d Binary files /dev/null and b/website/versioned_docs/version-0.24.0/policy/img/weave-policies.png differ diff --git a/website/versioned_docs/version-0.24.0/policy/img/weave-policy-profile.png b/website/versioned_docs/version-0.24.0/policy/img/weave-policy-profile.png new file mode 100644 index 0000000000..72f9150960 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/policy/img/weave-policy-profile.png differ diff --git a/website/versioned_docs/version-0.24.0/policy/intro.mdx b/website/versioned_docs/version-0.24.0/policy/intro.mdx new file mode 100644 index 0000000000..c20571f99a --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/intro.mdx @@ -0,0 +1,23 @@ +--- +title: Introduction +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Introduction + +## Policy + +Weave Policy Engine helps users have continuous security and compliance checks across their software delivery pipeline. The engine utilizes policy-as-code to guarantee security, resilience, and coding standards across applications and infrastructure. The engine comes with 100+ policies covering numerous security and compliance benchmarks like SOC2, GDPR, PCI-DSS, HIPAA, Mitre Attack and more. + +The policy engine provides the following functionality: + +### Admission Controller +An out-of-the-box admission controller that monitors any changes happening to the clusters' deployments and resources, and prevents violating changes at deployment time from being deployed to clusters. + +### Audit +Daily scans of your clusters' deployments and resources, then report back any policy violations. The audit results can be published to different data analytics tools to provide compliance posture analysis of your clusters runtime. + +### Commit/Build Time Checks +Early feedback on policy violations at the commit or build time, by reporting policy violations right inside git or other CI tools. This helps developers and operators detect policy violations and fix them before they deploy their changes to the clusters. diff --git a/website/versioned_docs/version-0.24.0/policy/policy-configuration.mdx b/website/versioned_docs/version-0.24.0/policy/policy-configuration.mdx new file mode 100644 index 0000000000..522e089e8c --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/policy-configuration.mdx @@ -0,0 +1,294 @@ +--- +title: PolicyConfig +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# PolicyConfig + +## Goal + +Users sometimes need to enforce the same policy(s) with different configurations (parameters) for different targets (workspaces, namespaces, applications, or resources). +The `PolicyConfig` CRD allows us to do that without duplicating policies by overriding policy parameters of multiple policies for a specific target. + +## Schema + +The PolicyConfig CRD consists of two sections 1) `match` used to specify the target of this PolicyConfig and 2) `config` used to specify the policy parameters that will override the orginal policy parameters. + +
Expand to see a PolicyConfig example + + ```yaml + apiVersion: pac.weave.works/v2beta2 + kind: PolicyConfig # policy config resource kind + metadata: + name: my-config # policy config name + spec: + match: # matches (targets of the policy config) + workspaces: # add one or more name workspaces + - team-a + - team-b + config: # config for policies [one or more] + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 + ``` + +
+ +Each PolicyConfig CR can target either workspaces, namespaces, applications or resources. Targeting the same target explicitly in multiple PolicyConfigs is not allowed, ie: you can't use the same namespace in several PolicyConfigs which target namespaces. + +To target workspaces: + + ```yaml + match: + workspaces: + - team-a + - team-b + ``` + +To target namespaces: + + ```yaml + match: + namespaces: + - dev + - prod + ``` + +To target applications: + + ```yaml + match: + apps: # add one or more apps [HelmRelease, Kustomization] + - kind: HelmRelease + name: my-app # app name + namespace: flux-system # app namespace [if empty will match in any namespace] + ``` + +To target resources: + + ```yaml + match: + resources: # add one or more resources [Deployment, ReplicaSet, ..] + - kind: Deployment + name: my-deployment # resource name + namespace: default # resource namespace [if empty will match in any namespace] + ``` + + +Each PolicyConfig can override the parameters of one or more policies: + + ```yaml + config: # config for policies [one or more] + weave.policies.containers-minimum-replica-count: # the id of the policy + parameters: + replica_count: 3 + owner: owner-4 + weave.policies.containers-running-in-privileged-mode: + parameters: + privilege: true + ``` + +## Overlapping Targets + +While it's not possible to create PolicyConfigs that explicitly target the same targets, it can happen implicitly ex: by targeting a namespace in a PolicyConfig and targeting an application that exists in this namespace in another. +Whenever targets overlap, the narrower the scope of the PolicyConfig, the more precedence it has. Accordingly in the previous example, the configuration of the PolicyConfig targeting the application will have precedence over the PolicyConfig targeting the namespace. + +Those are the possible targets from lowest to highest precendence: +- PolicyConfig which targets a workspace. +- PolicyConfig which targets a namespace. +- PolicyConfig which targets an application in all namespaces. +- PolicyConfig which targets an application in a certain namespace. +- PolicyConfig which targets a kubernetes resource in all namespaces. +- PolicyConfig which targets a kubernetes resource in a specific namespace. + +**Note**: +- All configs are applied from low priority to high priority while taking into consideration the common parameters between configs. +- Each config only affects the parameters defined in it. + +### Example + +We have a Kustomization application `app-a` and deployment `deployment-1` which is part of this application. + +
Expand to see manifests + +```yaml +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-1 +spec: + match: + namespaces: + - flux-system + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 2 + owner: owner-1 +--- +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-2 +spec: + match: + apps: + - kind: Kustomization + name: app-a + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 +--- +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-3 +spec: + match: + apps: + - kind: Kustomization + name: app-a + namespace: flux-system + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 4 +--- +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-4 +spec: + match: + resources: + - kind: Deployment + name: deployment-1 + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 5 + owner: owner-4 +--- + +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-5 +spec: + match: + resources: + - kind: Deployment + name: deployment-1 + namespace: flux-system + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 6 +``` + +
+ +**In the above example when you apply the 5 configurations**... + +- `app-a` will be affected by `my-config-5`. It will be applied on the policies defined in it, which will affect deployment `deployment-1` in namespace `flux-system` as it matches the kind, name and namespace. + + :::note + Deploying `deployment-1` in another namespace other than `flux-system` won't be affected by this configuration + ::: + + Final config values will be as follows: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 6 # from my-config-5 + owner: owner-4 # from my-config-4 + ``` + - _Deployment `deployment-1` in namespace `flux-system`, `replica_count` must + be `>= 6`_ + - _Also it will be affected by `my-config-4` for `owner` configuration + parameter `owner: owner-4`_ + + +**In the above example when you apply `my-config-1`, `my-config-2`, `my-config-3` and `my-config-4`** + +- `my-config-4` will be applied on the policies defined in it which will affect deployment `deployment-1` in all namespaces as it matches the kind and name only. + + Final config values will be as follows: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 5 # from my-config-4 + owner: owner-4 # from my-config-4 + ``` + + - _Deployment `deployment-1` in all namespaces `replica_count` must be `>= 5`_ + - _Also it will be affected by `my-config-4` for `owner` configuration + parameter `owner: owner-4`_ + +**In the previous example when you apply `my-config-1`, `my-config-2` and `my-config-3`** + +- `my-config-3` will be applied on the policies defined in it which will affect application `app-a` and all the resources in it in namespace `flux-system` as it matches the kind, name and namespace. + + :::note + Deploying `app-a` in another namespace other than `flux-system` won't be affected by this configuration + ::: + + Final config values will be the follows: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 4 # from my-config-3 + owner: owner-1 # from my-config-1 + ``` + + - _Application `app-a` and all the resources in it in namespaces + `flux-system`, `replica_count` must be `>= 4`_ + - _Also it will be affected by `my-config-1` for `owner` configuration + parameter `owner: owner-1`_ + +**In the above example when you apply `my-config-1` and `my-config-2`** + +- `my-config-2` will be applied on the policies defined in it which will affect application `app-a` and all the resources in it in all namespaces as it matches the kind and name only. + + Final config values will be as follows: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 # from my-config-2 + owner: owner-1 # from my-config-1 + ``` + + - _Application `app-a` and all the resources in all namespaces, + `replica_count` must be `>= 3`_ + - _Also it will be affected by `my-config-1` for `owner` configuration + parameter `owner: owner-1`_ + +**In the above example when you apply `my-config-1`** + +- `my-config-1` will be applied on the policies defined in it. which will affect the namespace `flux-system` with all applications and resources in it as it matches by namespace only. + + Final config values will be as follows: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 2 # from my-config-1 + owner: owner-1 # from my-config-1 + ``` + + - _Any application or resource in namespace `flux-system`, `replica_count` must + be `>= 2`_ + - _Also it will be affected by `my-config-1` for `owner` configuration + parameter `owner: owner-1`_ diff --git a/website/versioned_docs/version-0.24.0/policy/policy-set.mdx b/website/versioned_docs/version-0.24.0/policy/policy-set.mdx new file mode 100644 index 0000000000..ba40e8ed71 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/policy-set.mdx @@ -0,0 +1,88 @@ +--- +title: PolicySet +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# PolicySet + +This is an optional custom resource that is used to select a group of policies to work in specific [modes](./weave-policy-profile.mdx#agent-modes). + +In each mode, the agent will list all the PolicySets of this mode and check which policies match any of those policysets, then validate the resources against them. + +If there are no PolicySets found for a certain mode, all policies will be applied during this mode. + +> Note: [Tenant Policies](./policy.mdx#tenant-policy) is always active in the [Admission](#admission) mode, event if it is not selected in the `admission` policysets + +**Example** +```yaml +apiVersion: pac.weave.works/v2beta2 +kind: PolicySet +metadata: + name: my-policy-set +spec: + mode: admission + filters: + ids: + - weave.policies.containers-minimum-replica-count + categories: + - security + severities: + - high + - medium + standards: + - pci-dss + tags: + - tag-1 +``` + +PolicySets can be created for any of the three modes supported by the agent: `admission`, `audit`, and `tfAdmission`. + + +## Grouping Policies + +Policies can be grouped by their ids, categories, severities, standards and tags + +The policy will be applied if any of the filters are matched. + + +## Migration from v2beta1 to v2beta2 + +### New fields +- New required field `spec.mode` is added. PolicySets should be updated to set the mode + +Previously the agent was configured with which policysets to use in each mode. Now we removed this argument from the agent's configuration and add the mode to the Policyset itself. + +#### Example of the agent configuration in versions older than v2.0.0 + +```yaml +# config.yaml +admission: + enabled: true + policySet: admission-policy-set + sinks: + filesystemSink: + fileName: admission.txt +``` + +#### Example of current PolicySet with mode field + +```yaml +apiVersion: pac.weave.works/v2beta2 +kind: PolicySet +metadata: + name: admission-policy-set +spec: + mode: admission + filters: + ids: + - weave.policies.containers-minimum-replica-count +``` + + +### Updated fields +- Field `spec.name` became optional. + +### Deprecate fields +- Field `spec.id` is deprecated. diff --git a/website/versioned_docs/version-0.24.0/policy/policy.mdx b/website/versioned_docs/version-0.24.0/policy/policy.mdx new file mode 100644 index 0000000000..818e92ac58 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/policy.mdx @@ -0,0 +1,66 @@ +--- +title: Policy +hide_title: true +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; + +# Policy + +## Policy CRD +The Policy CRD is used to define policies which are then consumed and used by the agent to validate entities. + +It uses [OPA Rego Language](https://www.openpolicyagent.org/docs/latest/policy-language) to evaluate the entities. + +## Policy Library + +You should have a policy library repo set up which includes your policies resources as CRDs. + +:::info +Enterprise customers should have access to fork policy library repo into their local repositories. +::: + +## Tenant Policy + +Tenant policies are special policies that are used by the [Multi Tenancy](https://docs.gitops.weave.works/docs/enterprise/multi-tenancy/) feature in [Weave GitOps Enterprise](https://docs.gitops.weave.works/docs/intro-ee/) + +Tenant policies have a special tag `tenancy`. + + +## Mutating Resources + +Starting from version `v2.2.0`, the policy agent will support mutating resources. + +To enable mutating resources, policies must have field `mutate` set to `true` and the rego code should return the `violating_key` and the `recommended_value` in the violation response. The mutation webhook will use the `violating_key` and `recommended_value` to mutate the resource and return the new mutated resource. + +Example + +``` +result = { + "issue_detected": true, + "msg": sprintf("Replica count must be greater than or equal to '%v'; found '%v'.", [min_replica_count, replicas]), + "violating_key": "spec.replicas", + "recommended_value": min_replica_count +} +``` + + +## Policy Validation + +The policy validation object is the result of validating an entity against a policy. It contains all the necessary information to give the user a clear idea on what caused this violation or compliance. + +```yaml +id: string # identifier for the violation +account_id: string # organization identifier +cluster_id: string # cluster identifier +policy: object # contains related policy data +entity: object # contains related resource data +status: string # Violation or Compliance +message: string # message that summarizes the policy validation +type: string # the mode that produced this object. one of: Admission, Audit, TFAdmission +trigger: string # what triggered the validation, create request or initial audit,.. +created_at: string # time that the validation occurred in +``` diff --git a/website/versioned_docs/version-0.24.0/policy/releases.mdx b/website/versioned_docs/version-0.24.0/policy/releases.mdx new file mode 100644 index 0000000000..722bbf8c72 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/releases.mdx @@ -0,0 +1,132 @@ +--- +title: Profile Releases +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Profile Releases + + +## v0.6.5 + +### Highlights + +- **Agent** + - Add support for mutating violating resource. + +### Dependency Versions + +- Policy Agent v2.2.0 + +### Policy Library Compatibility + +Compatible with Policy Library versions: + +- v1.2.0 + +Needs this [migration steps](./policy-set.mdx#migration-from-v2beta1-to-v2beta2) to be compatible with the following versions: + +- v1.1.0 +- v1.0.0 +- v0.4.0 + + +## v0.6.4 + +### Highlights +- **Agent** + - Add PolicyConfig CRD to make it possible to customize policy configuration per namespaces, applications or resources + - Add mode field to policy set and add policy modes to its status + - Add policy modes to labels to support filtering + - Support backward compatibility for policy version v2beta1 + +### Dependency Versions + +- Policy Agent v2.0.0 + +### Policy Library Compatibility + +Compatible with Policy Library versions: + +- v1.2.0 + +Needs this [migration steps](./policy-set.mdx#migration-from-v2beta1-to-v2beta2) to be compatible with the following versions: + +- v1.1.0 +- v1.0.0 +- v0.4.0 + + +## v0.6.3 + +### Highlights +- **Agent** + - Reference flux objects in violations events instead of the original resource object to be able to list specific flux application violations + +### Dependency Versions + +- policy-agent 1.2.1 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 +- v1.1.0 + +## v0.6.2 + +### Highlights +- **Agent** + - Add Terraform mode to allow validating terraform plans + - Support targeting kubernetes HPA resources + +### Dependency Versions + +- policy-agent 1.2.0 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 +- v1.1.0 + +While both v.0.4.0 and v1.0.0 are compatible with the agent. Only v1.1.0 includes the modification needed to make Controller Minimum Replica Count policy with with `horizontalpodautoscalers` + +## v0.6.1 + +### Highlights +- **Agent** + - Make the audit interval configurable through `config.audit.interval`. It defaults to 24 hours. + - Add support for targeting certain flux resources (kustomizations, helmreleases and ocirepositories) in the admission mode. +- **Profile** + - Add the ability to use an existing GitSource instead of creating a new one. + + +### Dependency Versions + +- policy-agent 1.1.0 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 + +## v0.6.0 + +### Highlights +- **Agent** + - Configure the agent through a configuration file instead of arguments. + - Allow defining different validation sinks for audit and admission modes. + - Add the PolicySet CRD to the hem chart. +- **Profile** + - Disable the default policy source. + +### Dependency Versions + +- policy-agent 1.0.0 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 diff --git a/website/versioned_docs/version-0.24.0/policy/weave-policy-profile.mdx b/website/versioned_docs/version-0.24.0/policy/weave-policy-profile.mdx new file mode 100644 index 0000000000..7a9a911751 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/policy/weave-policy-profile.mdx @@ -0,0 +1,346 @@ +--- +title: Policy Profile +hide_title: true +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; + +# Policy Profile + +## Overview + +Weave policy profile provides policies to automate the enforcement of best practices and conventions. It ensures the compliance of workloads through the use of a policy agent that provides an admission controller webhook that stops violating resources from deploying to a cluster and runs a daily audit that reports violating resources already deployed. + +The profile configuration contains two main sections `policySource` to configure the source for deploying policies and `policy-agent` to configure the policy agent. + +
Expand for an example of the profile values file + +```yaml +policy-agent: + failurePolicy: Ignore + + # If you don't want to use cert-manager, set useCertManager to false and provide your own certs + useCertManager: true + certificate: "" + key: "" + caCertificate: "" + + persistence: + enabled: false + # claimStorage: 1Gi + # sinkDir: /tmp + # storageClassName: standard + + config: + accountId: "" + clusterId: "" + + audit: + # Enable audit functionality + enabled: false + # sinks: + # # Enable writing violations as K8s events + # k8sEventsSink: + # enabled: true + + admission: + # Enable admission functionality + enabled: true + # mutate: true # enable mutating violating resources + sinks: + # Enable writing violations as K8s events + k8sEventsSink: + enabled: true + + +policySource: + enabled: false + # url: ssh://git@github.com/weaveworks/policy-library + # tag: v1.0.0 + # branch: + # path: ./ # Could be a path to the policies dir or a kustomization.yaml file + # secretRef: policy-library-auth # (Optional): Name of the K8s secret with private repo auth credentials + # sourceRef: # Could specify a name for an existing GitSource reference instead of creating a new one + # kind: GitRepository + # name: policy-library + # namespace: flux-system +``` + +
+ +--- +## Policy Sources + +Policies are provided in the profile as Custom Resources. The agent reads from the policies deployed on the cluster and runs them during each admission request or when auditing a resource. + +Policies are hosted in a policy library which is usually a git repository. They are fetched in the profile through the use of `kustomize.toolkit.fluxcd.io.Kustomization`, that deploys the policies to the cluster. + +By default all policies in the specified path would be deployed in order to specify which policies should be deployed in a library, a `kustomize.config.k8s.io.Kustomization` file should be defined in the repository. + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: # specifies the path to each required policy + - policies/ControllerContainerAllowingPrivilegeEscalation/policy.yaml + - policies/ControllerContainerRunningAsRoot/policy.yaml + - policies/ControllerReadOnlyFileSystem/policy.yaml +``` + +The profile then needs to be configured with the necessary config to be able to reach the repository that is acting as a policy library. + +```yaml +policySource: + enabled: true + url: URL of the repo where your policies exist + tag: tag name on the policies repo + path: Path to the policies dir - or a kustomization.yaml that selects some policies - in the repo + secretRef (if the repo is private): Name of the K8s secret with private repo credentials (leave empty if the repo is public) +``` + +There is the option of referencing an existing policy library source instead of creating a new one. +```yaml +policySource: + enabled: true + sourceRef: + kind: Kind of the existing source + name: Name of the policy library source + namespace: Namespace where the source exists +``` +--- +## Policy Agent Configuration + +The `config` section is the single entry point for configuring the agent. + +The agent needs the following parameters to be provided in the configuration yaml file: + +- `accountId`: unique identifier that signifies the owner of that agent +- `clusterId`: unique identifier for the cluster that the agent will run against + +The following optional parameters can also be provided: + +- `logLevel`: app log level (default: "info") +- `probesListen`: address for the probes server to run on (default: ":9000") +- `metricsAddress`: address the metric endpoint binds to (default: ":8080") + +### Agent Modes + +#### Admission + +This contains the admission module that enforces policies. It uses the `controller-runtime` Kubernetes package to register a callback that will be called when the agent receives an admission request. Once called, the agent will validate the received resource against the admission and tenant policies and k8s will use the result of this validation to either allow or reject the creation/update of said resource. + +> Works with policies of provider `kubernetes` + +To enable admission control: + +```yaml +policy-agent: + config: + admission: + enabled: true +``` + +Enabling admission controller requires certificates for secure communication with the webhook client and the admission server. The best way to achieve this is by installing [cert manager](https://cert-manager.io/docs/installation/) and then configuring the profile as follows: + +```yaml +policy-agent: + useCertManager: true +``` + +The cert manager can also be installed by installing the cert manager profile while creating the cluster. + +There is the option of providing previously generated certificates although it is not recommended and it is up to the user to manage it: + +```yaml +policy-agent: + certificate: "---" # admission server certificate + key: "---" # admission server private key + caCertificate: "---" # CA bundle to validate the webhook server, used by the client +``` + +If the agent webhook could not be reached or the request failed to complete, the corresponding request would be refused. To change that behavior and accepts the request in cases of failure, this needs to be set: + +```yaml +policy-agent: + failurePolicy: Ignore +``` + +#### Audit +The audit functionality provides a full scan of the cluster(s) and reports back policy violations. This usually is used for policy violations reporting, and compliance posture analysis against known benchmarks like PCI DSS, CIS, .etc. + +> Works with policies of provider `kubernetes` + +To enable the audit functionality: + +```yaml +policy-agent: + config: + audit: + enabled: true + interval: 24 # configuring the frequent of audit operations running in hours (default is 24 hours) +``` + +The audit will be performed when the agent starts and then again periodically at an interval of your choice in hours (default is 24 hours). The results of the audit will be published to the configured [sink(s)](#policy-validation-sinks). + +#### Terraform Admission + +This is a webhook used to validate terraform plans. It is mainly used by the [TF-Controller](https://github.com/weaveworks/tf-controller) to enforce policies on terraform plans + +> Works with policies of provider `terraform` + +To enable the terraform admission control: + +```yaml +policy-agent: + config: + tfAdmission: + enabled: true +``` + +### Policy Validation Sinks + +When validating a resource, a [validation object](#policy-validation-sinks) is generated that contains information about the status of that validation and metadata about the resource and policy involved. These objects can be exported to be visible for users as a critical part of the audit flow, but can also be useful as logs for the admission scenario. + +By default, the agent only writes policy validations that are violating a certain policy when performing an audit. To write compliance results as well, the following needs to be specified in the profile: + +```yaml +policy-agent: + config: + audit: + writeCompliance: true +``` + +The agent profile supports storing the validations in different sinks. Multiple sinks can be used at the same time: + + + + + +The results will be dumped into a text file in the `logs` directory, in the agent container as a json string. It is important to note that this file will not be persisted and will be deleted upon pod restart, so generally this approach is not recommended for a production environment. + +To enable writing to a text file in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + fileSystemSink: + fileName: "file.json" +``` + +To enable writing to a text file in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + fileSystemSink: + fileName: "file.json" +``` + +It is possible to make the file persistent using the following configuration. This assumes that there is a [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) already configured on the cluster. + +```yaml +policy-agent: + persistence: + enabled: false # specifies whether to use persistence or not + claimStorage: 1Gi # claim size + storageClassName: standard # k8s StorageClass name +``` + + +The results will be written as Kubernetes events. This means that they are accessible through the kubernetes API and can be consumed by custom exporters. + +To enable writing Kubernetes events in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + k8sEventsSink: + enabled: true +``` + +To enable writing Kubernetes events in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + k8sEventsSink: + enabled: true +``` + + +This requires the cluster to be managed using flux. It makes use of the flux notification controller to send events to multiple sources, depending on the controller configuration. The agent writes the events to the controller and it proceeds to publish it to the configured listeners. + +To enable writing to flux notification controller in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + fluxNotificationSink: + address: "" +``` + +To enable writing to flux notification controller in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + fluxNotificationSink: + address: "" +``` + + +The results of validating entities against policies will be written to an Elasticsearch index. + +To enable writing to elasticsearch in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + elasticSink: + address: "" + username: "" + password: "" + indexName: "" + insertionMode: "upsert" +``` + +To enable writing to elasticsearch in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + elasticSink: + address: "" + username: "" + password: "" + indexName: "" + insertionMode: "insert" +``` + +We support the following insertion modes: + +- `insert`: doesn't update or delete any old records. The index will contain a log for all validation objects and give an insight of all the historical data. + +- `upsert`: updates the old result of validating an entity against a policy that happened on the same day. So the index will only contain the latest validation results for a policy and entity combination per day. + + + + diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops.md new file mode 100644 index 0000000000..266b700f38 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops.md @@ -0,0 +1,53 @@ +## gitops + +Weave GitOps + +### Synopsis + +Command line utility for managing Kubernetes applications via GitOps. + +### Examples + +``` + + # Get help for gitops create dashboard command + gitops create dashboard -h + gitops help create dashboard + + # Get the version of gitops along with commit, branch, and flux version + gitops version + + To learn more, you can find our documentation at https://docs.gitops.weave.works/ + +``` + +### Options + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + -h, --help help for gitops + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops beta](gitops_beta.md) - This component contains unstable or still-in-development functionality +* [gitops check](gitops_check.md) - Validates flux compatibility +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell +* [gitops create](gitops_create.md) - Creates a resource +* [gitops delete](gitops_delete.md) - Delete a resource +* [gitops get](gitops_get.md) - Display one or many Weave GitOps resources +* [gitops logs](gitops_logs.md) - Get logs for a resource +* [gitops remove](gitops_remove.md) - Remove various components of Weave GitOps +* [gitops replan](gitops_replan.md) - Replan a resource +* [gitops resume](gitops_resume.md) - Resume a resource +* [gitops run](gitops_run.md) - Set up an interactive sync between your cluster and your local file system +* [gitops set](gitops_set.md) - Sets one or many Weave GitOps CLI configs or resources +* [gitops suspend](gitops_suspend.md) - Suspend a resource +* [gitops version](gitops_version.md) - Display gitops version + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_beta.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_beta.md new file mode 100644 index 0000000000..9898dde169 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_beta.md @@ -0,0 +1,27 @@ +## gitops beta + +This component contains unstable or still-in-development functionality + +### Options + +``` + -h, --help help for beta +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops beta run](gitops_beta_run.md) - Set up an interactive sync between your cluster and your local file system + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_beta_run.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_beta_run.md new file mode 100644 index 0000000000..ed7d4c7ae8 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_beta_run.md @@ -0,0 +1,84 @@ +## gitops beta run + +Set up an interactive sync between your cluster and your local file system + +### Synopsis + +This will set up a sync between the cluster in your kubeconfig and the path that you specify on your local filesystem. If you do not have Flux installed on the cluster then this will add it to the cluster automatically. This is a requirement so we can sync the files successfully from your local system onto the cluster. Flux will take care of producing the objects for you. + +``` +gitops beta run [flags] +``` + +### Examples + +``` + +# Run the sync on the current working directory +gitops beta run . [flags] + +# Run the sync against the dev overlay path +gitops beta run ./deploy/overlays/dev + +# Run the sync on the dev directory and forward the port. +# Listen on port 8080 on localhost, forwarding to 5000 in a pod of the service app. +gitops beta run ./dev --port-forward port=8080:5000,resource=svc/app + +# Run the sync on the dev directory with a specified root dir. +gitops beta run ./clusters/default/dev --root-dir ./clusters/default + +# Run the sync on the podinfo demo. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./deploy/overlays/dev --no-session --timeout 3m --port-forward namespace=dev,resource=svc/backend,port=9898:9898 + +# Run the sync on the podinfo demo in the session mode. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./deploy/overlays/dev --timeout 3m --port-forward namespace=dev,resource=svc/backend,port=9898:9898 + +# Run the sync on the podinfo Helm chart, in the session mode. Please note that file Chart.yaml must exist in the directory. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./charts/podinfo --timeout 3m --port-forward namespace=flux-system,resource=svc/run-dev-helm-podinfo,port=9898:9898 +``` + +### Options + +``` + --allow-k8s-context strings The name of the KubeConfig context to explicitly allow. + --components strings The Flux components to install. (default [source-controller,kustomize-controller,helm-controller,notification-controller]) + --components-extra strings Additional Flux components to install, allowed values are image-reflector-controller,image-automation-controller. + --context string The name of the kubeconfig context to use + --dashboard-hashed-password string GitOps Dashboard password in BCrypt hash format + --dashboard-port string GitOps Dashboard port (default "9001") + --decryption-key-file string Path to an age key file used for decrypting Secrets using SOPS. + --disable-compression If true, opt-out of response compression for all requests to the server + --flux-version string The version of Flux to install. (default "0.37.0") + -h, --help help for run + --no-bootstrap Disable bootstrapping at shutdown. + --no-session Disable session management. If not specified, the session will be enabled by default. + --port-forward string Forward the port from a cluster's resource to your local machine i.e. 'port=8080:8080,resource=svc/app'. + --root-dir string Specify the root directory to watch for changes. If not specified, the root of Git repository will be used. + --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-5c08e8e8-dirty") + --session-namespace string Specify the namespace of the session. (default "default") + --skip-dashboard-install Skip installation of the Dashboard. This also disables the prompt asking whether the Dashboard should be installed. + --skip-resource-cleanup Skip resource cleanup. If not specified, the GitOps Run resources will be deleted by default. + --timeout duration The timeout for operations during GitOps Run. (default 5m0s) +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops beta](gitops_beta.md) - This component contains unstable or still-in-development functionality + diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_check.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_check.md new file mode 100644 index 0000000000..e43812a66f --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_check.md @@ -0,0 +1,39 @@ +## gitops check + +Validates flux compatibility + +``` +gitops check [flags] +``` + +### Examples + +``` + +# Validate flux and kubernetes compatibility +gitops check + +``` + +### Options + +``` + -h, --help help for check +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion.md new file mode 100644 index 0000000000..d46ecac767 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion.md @@ -0,0 +1,36 @@ +## gitops completion + +Generate the autocompletion script for the specified shell + +### Synopsis + +Generate the autocompletion script for gitops for the specified shell. +See each sub-command's help for details on how to use the generated script. + + +### Options + +``` + -h, --help help for completion +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops completion bash](gitops_completion_bash.md) - Generate the autocompletion script for bash +* [gitops completion fish](gitops_completion_fish.md) - Generate the autocompletion script for fish +* [gitops completion powershell](gitops_completion_powershell.md) - Generate the autocompletion script for powershell +* [gitops completion zsh](gitops_completion_zsh.md) - Generate the autocompletion script for zsh + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_bash.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_bash.md new file mode 100644 index 0000000000..94c861eacf --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_bash.md @@ -0,0 +1,55 @@ +## gitops completion bash + +Generate the autocompletion script for bash + +### Synopsis + +Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(gitops completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + gitops completion bash > /etc/bash_completion.d/gitops + +#### macOS: + + gitops completion bash > $(brew --prefix)/etc/bash_completion.d/gitops + +You will need to start a new shell for this setup to take effect. + + +``` +gitops completion bash +``` + +### Options + +``` + -h, --help help for bash + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_fish.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_fish.md new file mode 100644 index 0000000000..342c8ea160 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_fish.md @@ -0,0 +1,46 @@ +## gitops completion fish + +Generate the autocompletion script for fish + +### Synopsis + +Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + gitops completion fish | source + +To load completions for every new session, execute once: + + gitops completion fish > ~/.config/fish/completions/gitops.fish + +You will need to start a new shell for this setup to take effect. + + +``` +gitops completion fish [flags] +``` + +### Options + +``` + -h, --help help for fish + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_powershell.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_powershell.md new file mode 100644 index 0000000000..028132e477 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_powershell.md @@ -0,0 +1,43 @@ +## gitops completion powershell + +Generate the autocompletion script for powershell + +### Synopsis + +Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + gitops completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. + + +``` +gitops completion powershell [flags] +``` + +### Options + +``` + -h, --help help for powershell + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_zsh.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_zsh.md new file mode 100644 index 0000000000..df6caa3920 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_completion_zsh.md @@ -0,0 +1,57 @@ +## gitops completion zsh + +Generate the autocompletion script for zsh + +### Synopsis + +Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(gitops completion zsh); compdef _gitops gitops + +To load completions for every new session, execute once: + +#### Linux: + + gitops completion zsh > "${fpath[1]}/_gitops" + +#### macOS: + + gitops completion zsh > $(brew --prefix)/share/zsh/site-functions/_gitops + +You will need to start a new shell for this setup to take effect. + + +``` +gitops completion zsh [flags] +``` + +### Options + +``` + -h, --help help for zsh + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create.md new file mode 100644 index 0000000000..73dd5b8f35 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create.md @@ -0,0 +1,49 @@ +## gitops create + +Creates a resource + +### Examples + +``` + +# Create a HelmRepository and HelmRelease to deploy Weave GitOps +gitops create dashboard ww-gitops \ + --password=$PASSWORD \ + --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml + +# Create a Terraform object +gitops create terraform my-resource \ + -n my-namespace \ + --source GitRepository/my-project \ + --path ./terraform \ + --interval 1m \ + --export > ./clusters/my-cluster/infra/terraform-my-resource.yaml + +``` + +### Options + +``` + --export Export in YAML format to stdout. + -h, --help help for create + --timeout duration The timeout for operations during resource creation. (default 3m0s) +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops create dashboard](gitops_create_dashboard.md) - Create a HelmRepository and HelmRelease to deploy Weave GitOps +* [gitops create terraform](gitops_create_terraform.md) - Create a Terraform object + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create_dashboard.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create_dashboard.md new file mode 100644 index 0000000000..ed85e4e8f5 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create_dashboard.md @@ -0,0 +1,48 @@ +## gitops create dashboard + +Create a HelmRepository and HelmRelease to deploy Weave GitOps + +### Synopsis + +Create a HelmRepository and HelmRelease to deploy Weave GitOps + +``` +gitops create dashboard [flags] +``` + +### Examples + +``` + +# Create a HelmRepository and HelmRelease to deploy Weave GitOps +gitops create dashboard ww-gitops \ + --password=$PASSWORD \ + --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for dashboard + --password string The password of the dashboard admin user. + --username string The username of the dashboard admin user. (default "admin") +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --export Export in YAML format to stdout. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + --timeout duration The timeout for operations during resource creation. (default 3m0s) +``` + +### SEE ALSO + +* [gitops create](gitops_create.md) - Creates a resource + diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create_terraform.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create_terraform.md new file mode 100644 index 0000000000..93ddcbdf35 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_create_terraform.md @@ -0,0 +1,53 @@ +## gitops create terraform + +Create a Terraform object + +### Synopsis + +Create a Terraform object + +``` +gitops create terraform [flags] +``` + +### Examples + +``` + +# Create a Terraform resource in the default namespace +gitops create terraform -n default my-resource --source GitRepository/my-project --path ./terraform --interval 15m + +# Create and export a Terraform resource manifest to the standard output +gitops create terraform -n default my-resource --source GitRepository/my-project --path ./terraform --interval 15m --export + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for terraform + --interval string Interval at which the Terraform configuration should be applied + --path string Path to the Terraform configuration + --source string Source of the Terraform configuration +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --export Export in YAML format to stdout. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + --timeout duration The timeout for operations during resource creation. (default 3m0s) + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops create](gitops_create.md) - Creates a resource + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_delete.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_delete.md new file mode 100644 index 0000000000..f3727cfd2e --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_delete.md @@ -0,0 +1,27 @@ +## gitops delete + +Delete a resource + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops delete terraform](gitops_delete_terraform.md) - Delete a Terraform object + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_delete_terraform.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_delete_terraform.md new file mode 100644 index 0000000000..f12ca592a9 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_delete_terraform.md @@ -0,0 +1,41 @@ +## gitops delete terraform + +Delete a Terraform object + +``` +gitops delete terraform [flags] +``` + +### Examples + +``` + +# Delete a Terraform resource in the default namespace +gitops delete terraform -n default my-resource + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for terraform +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops delete](gitops_delete.md) - Delete a resource + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get.md new file mode 100644 index 0000000000..edaca4db58 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get.md @@ -0,0 +1,40 @@ +## gitops get + +Display one or many Weave GitOps resources + +### Examples + +``` + +# Get the CLI configuration for Weave GitOps +gitops get config + +# Generate a hashed secret +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops get bcrypt-hash](gitops_get_bcrypt-hash.md) - Generates a hashed secret +* [gitops get config](gitops_get_config.md) - Prints out the CLI configuration for Weave GitOps + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get_bcrypt-hash.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get_bcrypt-hash.md new file mode 100644 index 0000000000..5b7326ed9b --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get_bcrypt-hash.md @@ -0,0 +1,39 @@ +## gitops get bcrypt-hash + +Generates a hashed secret + +``` +gitops get bcrypt-hash [flags] +``` + +### Examples + +``` + +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash + +``` + +### Options + +``` + -h, --help help for bcrypt-hash +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops get](gitops_get.md) - Display one or many Weave GitOps resources + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get_config.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get_config.md new file mode 100644 index 0000000000..1cf38434e1 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_get_config.md @@ -0,0 +1,37 @@ +## gitops get config + +Prints out the CLI configuration for Weave GitOps + +``` +gitops get config [flags] +``` + +### Examples + +``` + +# Prints out the CLI configuration for Weave GitOps +gitops get config +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops get](gitops_get.md) - Display one or many Weave GitOps resources + diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_logs.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_logs.md new file mode 100644 index 0000000000..76220c4e2d --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_logs.md @@ -0,0 +1,27 @@ +## gitops logs + +Get logs for a resource + +### Options + +``` + -h, --help help for logs +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops logs terraform](gitops_logs_terraform.md) - Get the runner logs of a Terraform object + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_logs_terraform.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_logs_terraform.md new file mode 100644 index 0000000000..9e903456e2 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_logs_terraform.md @@ -0,0 +1,41 @@ +## gitops logs terraform + +Get the runner logs of a Terraform object + +``` +gitops logs terraform [flags] +``` + +### Examples + +``` + +# Get the runner logs of a Terraform object in the "flux-system" namespace +gitops logs terraform --namespace flux-system my-resource + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for terraform +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops logs](gitops_logs.md) - Get logs for a resource + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_remove.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_remove.md new file mode 100644 index 0000000000..d121752fa6 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_remove.md @@ -0,0 +1,27 @@ +## gitops remove + +Remove various components of Weave GitOps + +### Options + +``` + -h, --help help for remove +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops remove run](gitops_remove_run.md) - Remove GitOps Run sessions + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_remove_run.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_remove_run.md new file mode 100644 index 0000000000..565fdfac90 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_remove_run.md @@ -0,0 +1,55 @@ +## gitops remove run + +Remove GitOps Run sessions + +### Synopsis + +Remove GitOps Run sessions + +``` +gitops remove run [flags] +``` + +### Examples + +``` + +# Remove the GitOps Run session "dev-1234" from the "flux-system" namespace +gitops remove run --namespace flux-system dev-1234 + +# Remove all GitOps Run sessions from the default namespace +gitops remove run --all-sessions + +# Remove all GitOps Run sessions from the dev namespace +gitops remove run -n dev --all-sessions + +# Clean up resources from a failed GitOps Run in no session mode +gitops remove run --no-session + +``` + +### Options + +``` + --all-sessions Remove all GitOps Run sessions + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for run + --no-session Remove all GitOps Run components in the non-session mode +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops remove](gitops_remove.md) - Remove various components of Weave GitOps + diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_replan.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_replan.md new file mode 100644 index 0000000000..ec24bc69e5 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_replan.md @@ -0,0 +1,36 @@ +## gitops replan + +Replan a resource + +### Examples + +``` + +# Replan the Terraform plan of a Terraform object from the "flux-system" namespace +gitops replan terraform --namespace flux-system my-resource + +``` + +### Options + +``` + -h, --help help for replan +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops replan terraform](gitops_replan_terraform.md) - Trigger replan for a Terraform object + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_replan_terraform.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_replan_terraform.md new file mode 100644 index 0000000000..21e884fe72 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_replan_terraform.md @@ -0,0 +1,41 @@ +## gitops replan terraform + +Trigger replan for a Terraform object + +``` +gitops replan terraform [flags] +``` + +### Examples + +``` + +# Replan the Terraform plan of a Terraform object from the "flux-system" namespace +gitops replan terraform --namespace flux-system my-resource + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for terraform +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops replan](gitops_replan.md) - Replan a resource + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_resume.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_resume.md new file mode 100644 index 0000000000..1f07bf5b59 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_resume.md @@ -0,0 +1,36 @@ +## gitops resume + +Resume a resource + +### Examples + +``` + +# Suspend a Terraform object from the "flux-system" namespace +gitops resume terraform --namespace flux-system my-resource + +``` + +### Options + +``` + -h, --help help for resume +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops resume terraform](gitops_resume_terraform.md) - Resume a Terraform object + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_resume_terraform.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_resume_terraform.md new file mode 100644 index 0000000000..77becfade6 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_resume_terraform.md @@ -0,0 +1,41 @@ +## gitops resume terraform + +Resume a Terraform object + +``` +gitops resume terraform [flags] +``` + +### Examples + +``` + +# Resume a Terraform object in the "flux-system" namespace +gitops resume terraform --namespace flux-system my-resource + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for terraform +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops resume](gitops_resume.md) - Resume a resource + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_run.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_run.md new file mode 100644 index 0000000000..a609989ec7 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_run.md @@ -0,0 +1,84 @@ +## gitops run + +Set up an interactive sync between your cluster and your local file system + +### Synopsis + +This will set up a sync between the cluster in your kubeconfig and the path that you specify on your local filesystem. If you do not have Flux installed on the cluster then this will add it to the cluster automatically. This is a requirement so we can sync the files successfully from your local system onto the cluster. Flux will take care of producing the objects for you. + +``` +gitops run [flags] +``` + +### Examples + +``` + +# Run the sync on the current working directory +gitops beta run . [flags] + +# Run the sync against the dev overlay path +gitops beta run ./deploy/overlays/dev + +# Run the sync on the dev directory and forward the port. +# Listen on port 8080 on localhost, forwarding to 5000 in a pod of the service app. +gitops beta run ./dev --port-forward port=8080:5000,resource=svc/app + +# Run the sync on the dev directory with a specified root dir. +gitops beta run ./clusters/default/dev --root-dir ./clusters/default + +# Run the sync on the podinfo demo. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./deploy/overlays/dev --no-session --timeout 3m --port-forward namespace=dev,resource=svc/backend,port=9898:9898 + +# Run the sync on the podinfo demo in the session mode. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./deploy/overlays/dev --timeout 3m --port-forward namespace=dev,resource=svc/backend,port=9898:9898 + +# Run the sync on the podinfo Helm chart, in the session mode. Please note that file Chart.yaml must exist in the directory. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./charts/podinfo --timeout 3m --port-forward namespace=flux-system,resource=svc/run-dev-helm-podinfo,port=9898:9898 +``` + +### Options + +``` + --allow-k8s-context strings The name of the KubeConfig context to explicitly allow. + --components strings The Flux components to install. (default [source-controller,kustomize-controller,helm-controller,notification-controller]) + --components-extra strings Additional Flux components to install, allowed values are image-reflector-controller,image-automation-controller. + --context string The name of the kubeconfig context to use + --dashboard-hashed-password string GitOps Dashboard password in BCrypt hash format + --dashboard-port string GitOps Dashboard port (default "9001") + --decryption-key-file string Path to an age key file used for decrypting Secrets using SOPS. + --disable-compression If true, opt-out of response compression for all requests to the server + --flux-version string The version of Flux to install. (default "0.37.0") + -h, --help help for run + --no-bootstrap Disable bootstrapping at shutdown. + --no-session Disable session management. If not specified, the session will be enabled by default. + --port-forward string Forward the port from a cluster's resource to your local machine i.e. 'port=8080:8080,resource=svc/app'. + --root-dir string Specify the root directory to watch for changes. If not specified, the root of Git repository will be used. + --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-5c08e8e8-dirty") + --session-namespace string Specify the namespace of the session. (default "default") + --skip-dashboard-install Skip installation of the Dashboard. This also disables the prompt asking whether the Dashboard should be installed. + --skip-resource-cleanup Skip resource cleanup. If not specified, the GitOps Run resources will be deleted by default. + --timeout duration The timeout for operations during GitOps Run. (default 5m0s) +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps + diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_set.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_set.md new file mode 100644 index 0000000000..9c27c5666b --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_set.md @@ -0,0 +1,35 @@ +## gitops set + +Sets one or many Weave GitOps CLI configs or resources + +### Examples + +``` + +# Enables analytics in the current user's CLI configuration for Weave GitOps +gitops set config analytics true +``` + +### Options + +``` + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops set config](gitops_set_config.md) - Set the CLI configuration for Weave GitOps + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_set_config.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_set_config.md new file mode 100644 index 0000000000..2f0578db16 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_set_config.md @@ -0,0 +1,37 @@ +## gitops set config + +Set the CLI configuration for Weave GitOps + +``` +gitops set config [flags] +``` + +### Examples + +``` + +# Enables analytics in the current user's CLI configuration for Weave GitOps +gitops set config analytics true +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops set](gitops_set.md) - Sets one or many Weave GitOps CLI configs or resources + diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_suspend.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_suspend.md new file mode 100644 index 0000000000..9e25166dfb --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_suspend.md @@ -0,0 +1,36 @@ +## gitops suspend + +Suspend a resource + +### Examples + +``` + +# Suspend a Terraform object in the "flux-system" namespace +gitops resume terraform --namespace flux-system my-resource + +``` + +### Options + +``` + -h, --help help for suspend +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops suspend terraform](gitops_suspend_terraform.md) - Suspend a Terraform object + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_suspend_terraform.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_suspend_terraform.md new file mode 100644 index 0000000000..846e4ae144 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_suspend_terraform.md @@ -0,0 +1,41 @@ +## gitops suspend terraform + +Suspend a Terraform object + +``` +gitops suspend terraform [flags] +``` + +### Examples + +``` + +# Suspend a Terraform object in the "flux-system" namespace +gitops suspend terraform --namespace flux-system my-resource + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for terraform +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops suspend](gitops_suspend.md) - Suspend a resource + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_version.md b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_version.md new file mode 100644 index 0000000000..bbd2e9f269 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/cli-reference/gitops_version.md @@ -0,0 +1,30 @@ +## gitops version + +Display gitops version + +``` +gitops version [flags] +``` + +### Options + +``` + -h, --help help for version +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + -n, --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps + +###### Auto generated by spf13/cobra on 24-May-2023 diff --git a/website/versioned_docs/version-0.24.0/references/helm-reference.md b/website/versioned_docs/version-0.24.0/references/helm-reference.md new file mode 100644 index 0000000000..a091b3e92b --- /dev/null +++ b/website/versioned_docs/version-0.24.0/references/helm-reference.md @@ -0,0 +1,70 @@ +# Helm chart reference + + +This is a reference of all the configurable values in weave gitops's +helm chart. This is intended for customizing your installation after +you've gone through the [getting started](../getting-started/intro.mdx) guide. + +This reference was generated for the chart version 4.0.22 which installs weave gitops v0.24.0. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| adminUser.create | bool | `false` | Whether the local admin user should be created. If you use this make sure you add it to `rbac.impersonationResourceNames`. | +| adminUser.createClusterRole | bool | `true` | Specifies whether the clusterRole & binding to the admin user should be created. Will be created only if `adminUser.create` is enabled. Without this, the adminUser will only be able to see resources in the target namespace. | +| adminUser.createSecret | bool | `true` | Whether we should create the secret for the local adminUser. Will be created only if `adminUser.create` is enabled. Without this, we'll still set up the roles and permissions, but the secret with username and password has to be provided separately. | +| adminUser.passwordHash | string | `nil` | Set the password for local admin user. Requires `adminUser.create` and `adminUser.createSecret` This needs to have been hashed using bcrypt. You can do this via our CLI with `gitops get bcrypt-hash`. | +| adminUser.username | string | `"gitops-test-user"` | Set username for local admin user, this should match the value in the secret `cluster-user-auth` which can be created with `adminUser.createSecret`. Requires `adminUser.create`. | +| affinity | object | `{}` | | +| annotations | object | `{}` | Annotations to add to the deployment | +| envVars[0].name | string | `"WEAVE_GITOPS_FEATURE_TENANCY"` | | +| envVars[0].value | string | `"true"` | | +| envVars[1].name | string | `"WEAVE_GITOPS_FEATURE_CLUSTER"` | | +| envVars[1].value | string | `"false"` | | +| extraVolumeMounts | list | `[]` | | +| extraVolumes | list | `[]` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"ghcr.io/weaveworks/wego-app"` | | +| image.tag | string | `"v0.24.0"` | | +| imagePullSecrets | list | `[]` | | +| ingress.annotations | object | `{}` | | +| ingress.className | string | `""` | | +| ingress.enabled | bool | `false` | | +| ingress.hosts | string | `nil` | | +| ingress.tls | list | `[]` | | +| logLevel | string | `"info"` | What log level to output. Valid levels are 'debug', 'info', 'warn' and 'error' | +| metrics.enabled | bool | `false` | Start the metrics exporter | +| metrics.service.annotations | object | `{"prometheus.io/path":"/metrics","prometheus.io/port":"{{ .Values.metrics.service.port }}","prometheus.io/scrape":"true"}` | Annotations to set on the service | +| metrics.service.port | int | `2112` | Port to start the metrics exporter on | +| nameOverride | string | `""` | | +| networkPolicy.create | bool | `true` | Specifies whether default network policies should be created. | +| nodeSelector | object | `{}` | | +| oidcSecret.create | bool | `false` | | +| podAnnotations | object | `{}` | | +| podLabels | object | `{}` | | +| podSecurityContext | object | `{}` | | +| rbac.additionalRules | list | `[]` | If non-empty, these additional rules will be appended to the RBAC role and the cluster role. for example, additionalRules: - apiGroups: ["infra.contrib.fluxcd.io"] resources: ["terraforms"] verbs: [ "get", "list", "patch" ] | +| rbac.create | bool | `true` | Specifies whether the clusterRole & binding to the service account should be created | +| rbac.impersonationResourceNames | list | `[]` | If non-empty, this limits the resources that the service account can impersonate. This applies to both users and groups, e.g. `['user1@corporation.com', 'user2@corporation.com', 'operations']` | +| rbac.impersonationResources | list | `["users","groups"]` | Limit the type of principal that can be impersonated | +| rbac.viewSecretsResourceNames | list | `["cluster-user-auth","oidc-auth"]` | If non-empty, this limits the secrets that can be accessed by the service account to the specified ones, e.g. `['weave-gitops-enterprise-credentials']` | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| securityContext.allowPrivilegeEscalation | bool | `false` | | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.readOnlyRootFilesystem | bool | `true` | | +| securityContext.runAsNonRoot | bool | `true` | | +| securityContext.runAsUser | int | `1000` | | +| securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| serverTLS.enable | bool | `false` | Enable TLS termination in gitops itself. If you enable this, you need to create a secret, and specify the secretName. Another option is to create an ingress. | +| serverTLS.secretName | string | `"my-secret-tls"` | Specify the tls secret name. This type of secrets have a key called `tls.crt` and `tls.key` containing their corresponding values in base64 format. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets for more details and examples | +| service.annotations | object | `{}` | | +| service.create | bool | `true` | | +| service.port | int | `9001` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| tolerations | list | `[]` | | diff --git a/website/versioned_docs/version-0.24.0/releases.mdx b/website/versioned_docs/version-0.24.0/releases.mdx new file mode 100644 index 0000000000..08b30083dd --- /dev/null +++ b/website/versioned_docs/version-0.24.0/releases.mdx @@ -0,0 +1,549 @@ +--- +title: Releases +hide_title: true +--- +import TierLabel from "./_components/TierLabel"; + +# Releases + +:::info +This page details the changes for Weave GitOps Enterprise and its associated components. For Weave GitOps OSS - please see the release notes on [GitHub](https://github.com/weaveworks/weave-gitops/releases). +::: + +## v0.22.0 +2023-04-27 + + +### Highlights + +#### Explorer + +- Explorer supports now Flux sources. +- Applications UI and Sources UI could be configured to use Explorer backend to improve UI experience. +- Explorer collector uses impersonation. Ensure you [configure collector](../explorer/configuration/#authentication-and-authorization-for-collecting) for your leaf clusters. + +#### GitopsSets + +- Now supports correctly templating numbers and object chunks + +#### Cluster Bootstrapping + +- Don't wait for ControlPlane readiness to sync secrets, this allows secrets to be sync'd related to CNI or other early stage processes + +### Upgrade Notes (from the previous release) + +- Explorer: you should configure [collector service account](https://docs.gitops.weave.works/docs/explorer/configuration/#authentication-and-authorization-for-collecting) in your leaf clusters. + +### Known issues + +- Clusters page horizontally scrolls too much and status becomes unreadable for some fields + +### Dependency versions + +- weave-gitops v0.22.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.6.0 +- templates-controller v0.2.0 +- (optional) pipeline-controller v0.20.0 +- (optional) policy-agent v2.3.0 +- (optional) gitopssets-controller v0.9.0 + +## v0.21.2 +2023-04-13 + +### Highlights + +- See your gitopssets on leaf clusters in the UI +- Fixed bug where gitopssets would not update ConfigMaps +- View Open Pull requests button in the UI now allows you to select any GitRepository + +### Dependency versions + +- weave-gitops v0.21.2 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.5.0 +- templates-controller v0.1.4 +- (optional) pipeline-controller v0.20.0 +- (optional) policy-agent v2.3.0 +- (optional) gitopssets-controller v0.8.0 + +## v0.20.0 +2023-03-30 + +### Dependency versions + +- weave-gitops v0.20.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.5.0 +- templates-controller v0.1.4 +- (optional) pipeline-controller v0.20.0 +- (optional) policy-agent v2.3.0 +- (optional) gitopssets-controller v0.7.0 + +## v0.19.0 +2023-03-16 + +### Highlights + +#### UI + +- Gitopsssets come to the UI! + +### Dependency versions + +- weave-gitops v0.19.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- templates-controller v0.1.4 +- (optional) pipeline-controller v0.20.0 +- (optional) policy-agent v2.3.0 +- (optional) gitopssets-controller v0.6.0 + +## v0.18.0 +2023-03-02 +### Highlights + +#### UI + +- See the logged in user's OIDC groups in the UI via the new User Profile page +- Image Automation pages now show cluster information +- See details about the configured promotion strategy for a pipeline +- Log filtering by source and level for GitOps Run +- See all Policy Configs listed in the UI + +#### GitopsSets + +- New `cluster` generator allows you to interact with the Weave GitOps Cluster inventory. GitOps Clusters that are added and removed to the inventory are reflected by the generator. That can be used to target for example to manage applications across a fleet of clusters. +- Enhanced `gitRepository` generator can now scan directories and paths with the new `directory` option, which enables you to create for example dynamically Flux Kustomizations , based on your repository. +- New `apiClient` generator allows you to query and endpoint, and provide data for your template. +- Reconciliation metrics are now reported to the `/metrics` endpoint ready to be collected + + +### Dependency versions + +- weave-gitops v0.18.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- templates-controller v0.1.3 +- (optional) pipeline-controller v0.20.0 +- (optional) policy-agent v2.3.0 +- (optional) gitopssets-controller v0.5.0 + +## v0.17.0 +2023-02-16 +### Highlights + +This release contains dependency upgrades and bug fixes. For a larger list of updates, check out the [Weave GitOps v0.17.0](https://github.com/weaveworks/weave-gitops/releases/tag/v0.17.0) release. + +## v0.16.0 +2023-02-02 +### Highlights + +#### Create External Secrets via WGE UI +- It's becoming easier to create new a external secret CR through the UI instead of writing the whole CR yaml. +- The creation form will help users choose which cluster to deploy the External Secret to and which secret store to sync the secrets from. +- It's all done in the GitOps way. + +#### Plan Button in Terraform +- Adding **Add Plan** button in the terraform plan page to enable users to re-plan changes made. + +### Dependency versions + +- weave-gitops v0.16.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- templates-controller v0.1.2 +- (optional) pipeline-controller v0.14.0 +- (optional) policy-agent v2.2.0 +- (optional) gitopssets-controller v0.2.0 + +### Breaking changes + +No breaking changes + +## v0.15.1 +2023-01-19 +### Highlights + +#### Multi Repository support. Weave GitOps Enterprise adapts and scales to your repository structure +- Weave GitOps Enterprise, is now supporting via the WGE GUI the selection of the Git Repository. Enabling to scale and match the desired Git Repository structure. + +#### GitOps Templates +- Supporting path for Profiles, enabling to set the path for profiles in the template to configure where in the directory the HelmRelease gets created. +- Enhanced Enterprise CLI support for GitOps Templates. +#### GitOps Templates CLI enhancements +- Support for profiles in templates via CLI +- ```gitops create template``` supporting ```--config``` allows you to read command line flags from a config file and ```--output-dir``` allows you to write files out to a directory instead of just stdout +#### GitOpsSets in preview +- GitOpsSets enable Platform Operators to have a single definition for an application for multiple environments and a fleet of clusters. A single definition can be used to generate the environment and cluster-specific configuration. +- GitOpsSets has been released as a feature in preview of WGE. The Preview phase helps us to actively collect feedback and use cases, iterating and improving the feature to reach a level of maturity before we call it stable. Please contact us via [email](mailto:david.stauffer@weave.works) or [slack](https://join.slack.com/t/weave-community/shared_invite/zt-1nrm7dc6b-QbCec62CJ7qj_OUOtuJbrw) if you want to get access to the preview. + + + +### Minor fixes +#### OIDC +- Allows customising the requested scopes via config.oidc.customScopes: "email,groups,something_else" +- Token refreshing is now supported + + +### Dependency versions + +- weave-gitops v0.15.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) pipeline-controller v0.9.0 +- (optional) policy-agent v2.2.0 + +### Breaking changes + +No breaking changes + +## v0.14.1 +2023-01-05 +### Highlights + +#### Secrets management +- We are introducing new functionality into Weave GitOps Enterprise to help observe and manage secrets through external secrets operator (ESO). The new secrets UI will enable customers using ESO to observe and manage external secrets, as well as help them troubleshoot issues during their secrets creation and sync operations. In this release, we are including the ability to list all ExternalSecrets custom resources across multi-cluster environments. Users also will have the ability to navigate to each ExternalSecret and know the details of the secret, its sync status, and the last time this secret has been updated, as well as the latest events associated with the secret. + +#### Pipelines +- Retry promotion on failure. Now if a promotion fails there is an automatic retry functionalty, you can configure the threshold and delay via the CLI. +- Promotion webhook rate limiting. We enable now the configuration of the rate limit for the promotion webhooks. + +### Minor fixes +#### Workspaces +** [UI] "Tenant" ** is renamed to "Workspace" on details page. + +** [UI] Use time.RFC3339 ** format for all timestamps of the workspaces tabs. + +#### Other +** [UI] Error notification boundary ** does not allow user to navigate away from the page. + +** [Gitops run] GitOps Run ** doesn't ask to install dashboard twice + +### Dependency versions + +- weave-gitops v0.14.1 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) pipeline-controller v0.9.0 +- (optional) policy-agent v2.2.0 + +### Breaking changes + +No breaking changes + +## v0.13.0 +2022-12-22 +### Highlights + +#### GitOps Templates Path feature +- GitOps templates now provide the capability to write resources to multiple + paths in the Git repository. This feature allows complex scenarios, like for + example creating a self-service for an application that requires an RDS + database. We’ve provided + [documentation](./gitops-templates/repo-rendered-paths.mdx) which has a example. + +```yaml +spec: + resourcetemplates: + - path: ./clusters/${CLUSTER_NAME}/definition/cluster.yaml + content: + - apiVersion: cluster.x-k8s.io/v1alpha4 + kind: Cluster + metadata: + name: ${CLUSTER_NAME} + ... + - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AWSCluster + metadata: + name: ${CLUSTER_NAME} + ... + - path: ./clusters/${CLUSTER_NAME}/workloads/helmreleases.yaml + content: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: ${CLUSTER_NAME}-nginx + ... + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: ${CLUSTER_NAME}-cert-manager + ... +``` + +#### Workspace UI +- Weave GitOps now provides a GUI for Workspaces. + +#### Enhanced Terraform Table in UI +- Weave GitOps now provides more details on the Terraform inventory GUI page. Adding the type and identifier fields to the inventory table, plus filtering and a 'no data' message. + +#### Keyboard shortcuts for "port forwards" on GitOps Run +- Weave GitOps now building and printing a list of set up port forwards. +- Weave GitOps now opening the selected port forward URL on key press. Listening for keypress is performed with the `github.com/mattn/go-tty` package (other options required pressing Enter after a keypress, this catches just a single numeric keypress) and opening URLs with the `github.com/pkg/browser` package. + +#### Minor fixes +**[UI] Notifications** Fixed provider page showing a 404. + +### Dependency versions + +- weave-gitops v0.13.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) pipeline-controller v0.8.0 +- (optional) policy-agent v2.2.0 + +### Breaking changes + +No breaking changes + +## v0.12.0 +2022-12-09 + +### Highlights + +**We highly recommend users of v0.11.0 upgrade to this version as it includes fixes for a number of UI issues.** + +#### GitOps Templates + +- Support to specify Helm charts inside the CRD, instead of annotations. We’ve + provided [documentation](./gitops-templates/profiles.mdx) which has a example. + +```yaml +spec: + charts: + items: + - chart: cert-manager + version: v1.5.3 + editable: false + required: true + values: + installCRDs: ${CERT_MANAGER_INSTALL_CRDS} + targetNamespace: cert-manager + layer: layer-1 + template: + content: + metadata: + labels: + app.kubernetes.io/name: cert-manager + spec: + retries: ${CERT_MANAGER_RETRY_COUNT} +``` + +- Ability to edit all fields now, including name/namespace + +#### Authentication with OIDC support +Supporting custom OIDC groups claims for azure/okta integration +Support for OIDC custom username and group claims: + +```yaml +config + oidc: + claimUsername: "" + claimGroups: "" +``` + +#### Policy commit-time agent +- Support Azure DevOps and auto-remediation in commit-time enforcement. + +#### Admin User- simpler RBAC +- Weave GitOps default admin user can now “read” all objects. Why is this important? As users are trying out Weave GitOps they will most likely try it out with some of their favorite Cloud Native tools such as Crossplane, Tekton, Istio, etc. This enables them to see all of those resources and explore the full power of Weave GitOps. We still do not recommend this user for “production-use” cases, and customers should always be pushed towards implementing OIDC with scoped roles. + +#### Pipelines - adding Pipelines through Templates +- From the Pipelines view you can add new Pipelines in a way which leverages GitOpsTemplates, additionally - to help users configure these, we’ve provided [documentation](./pipelines/pipeline-templates.mdx) which has some samples. + +#### Support for multiple Flux instances on a single cluster +- Support for running multiple flux instances in different namespaces on a single cluster for resource isolation. + +#### Minor fixes + +**Terraform CRD Error** +Users of the Terraform Controller will be pleased to know we’ve addressed the issue where an error would be displayed if it had not been installed on all connected clusters. + +**Management cluster renaming** +If the name of the cluster where Weave GitOps Enterprise is installed, was changed from the default of management through the config.cluster.name parameter, certain workflows could fail such as fetching profiles, this has now been resolved. + +### Dependency versions​ +weave-gitops v0.12.0 +cluster-controller v1.4.1 +cluster-bootstrap-controller v0.3.0 +(optional) pipeline-controller v0.0.11 +(optional) policy-agent 2.1.1 + +### Known issues +- [UI] Notifications provider page shows a 404. + +## v0.11.0 +2022-11-25 + +### Highlights + +#### GitOpsTemplates +- We are working towards unifying CAPI and GitOps Templates under a single umbrella. For those already using CAPITemplates, we will ensure a smooth transition is possible by making use of a conversion hooks. There are some breaking changes for GitOpsTemplates as part of this transitionary period, so be sure to check the guidance under [Breaking Changes](#breaking-changes). +- We now retain the ordering of parameters in the template instead of sorting them alphabetically. Providing to the author control in what sequence the parameters are rendered in the form and thus present a more logically grouped set of parameters to the end consumer. +- You can control what + [delimiters](./gitops-templates/supported-langs.mdx#custom-delimiters) you + want to use in a template. This provides flexibility for if you want to use + the syntax for dynamic functions like the [helper functions](./gitops-templates/supported-langs.mdx#supported-functions-1) we support. + +#### Pipelines +- This [feature](pipelines/intro.mdx) is now enabled by default when you install the Weave GitOps Enterprise Helm Chart. You can toggle this with the `enablePipelines` flag. +- GitOpsTemplates are a highly flexible way to create new resources - including Pipelines. We now provide a shortcut on the Pipelines table view to navigate you to Templates with the `weave.works/template-type=pipeline` label. + +#### Telemetry +This release incorporates anonymous aggregate user behavior analytics to help us continuously improve the product. As an Enterprise customer, this is enabled by default. You can learn more about this [here](/feedback-and-telemetry#anonymous-aggregate-user-behavior-analytics). + +### Dependency versions +- weave-gitops v0.11.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) pipeline-controller v0.0.11 +- (optional) policy-agent 2.1.1 + +### Breaking changes + +#### GitOpsTemplates and CAPITemplates +We are making these changes to provide a unified and intuitive self-service experience within Weave GitOps Enterprise, removing misleading and potentially confusing terminology born from when only Clusters were backed by Templates. + +**New API Group for the GitOpsTemplate CRD** +- old: `clustertemplates.weave.works` +- new: `templates.weave.works` + +After upgrading Weave GitOps Enterprise which includes the updated CRD: +1. Update all your GitOpsTemplates in Git changing all occurrences of `apiVersion: clustertemplates.weave.works/v1alpha1` to `apiVersion: templates.weave.works/v1alpha1`. +2. Commit, push and reconcile. They should now be viewable in the Templates view again. +3. Clean up the old CRD. As it stands: + - `kubectl get gitopstemplate -A` will be empty as it is pointing to the old `clustertemplates.weave.works` CRD. + - `kubectl get gitopstemplate.templates.weave.works -A` will work +To fix the former of the commands, remove the old CRD (helm does not do this automatically for safety reasons): + - `kubectl delete crd gitopstemplates.clustertemplates.weave.works` + - You may have to wait up to 5 minutes for your local kubectl CRD cache to invalidate, then `kubectl get gitopstemplate -A` should be working as usual + +**Template Profiles / Applications / Credentials sections are hidden by default** + +For both `CAPITemplates` and `GitopsTemplates` the default visibility for all sections in a template has been set to `"false"`. To re-enable profiles or applications on a template you can tweak the annotations + +```yaml +annotations: + templates.weave.works/profiles-enabled: "true" # enable profiles + templates.weave.works/kustomizations-enabled: "true" # enable applications + templates.weave.works/credentials-enabled: "true" # enable CAPI credentials +``` + +**The default values for a profile are not fetched and included in a pull-request** + +Prior to this release WGE would fetch the default values.yaml for every profile installed and include them in the `HelmReleases` in the Pull Request when rendering out the profiles of a template. + +This was an expensive operation and occasionally led to timeouts. + +The new behaviour is to omit the values and fall back to the defaults included in the helm-chart. This sacrifices some UX (being able to see all the defaults in the PR and tweak them) to improve performance. **There should not be any final behaviour changes to the installed charts**. + +You can still view and tweak the `values.yaml` when selecting profiles to include on the "Create resource (cluster)" page. If changes are made here the updated values.yaml will be included. + +## v0.10.2 +2022-11-15 + +### Highlights +- Retain template parameter ordering. +- Allow configuration of the delimiters in templates. +- Add create a pipeline button. +- add missing support for policy version v2beta2 to tenancy cmd. + +### Dependency versions +- weave-gitops v0.10.2 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 2.1.1 + +## v0.10.1 +2022-11-10 + +### Highlights + +- Create non-cluster resources / Add Edit option to resources with create-request annotation +- bump pipeline-controller +- Parse annotations from template +- Add cost estimate message if available +- Adds support for showing policy modes and policy configs in the UI + +- Show suspended status on pipelines detail +- YAML view for Pipelines +- Align and link logo + +- Actually remove the watcher from the helm-watcher-cache +- UI 1817 disable create target name space if name space is flux system + +- Adding edit capi cluster resource acceptance test +- Add preview acceptance test + +### Dependency versions + +- weave-gitops v0.10.1 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 2.0.0 + + +## v0.9.6 +2022-10-17 + +### Highlights +- When adding applications, you can now preview the changes(PR) before creating a pull request +- You can now see included Cluster Profiles when previewing your Create Cluster PR +- Notifications are now available in the Notifications Page +- You can now automatically create namespace when adding applications + +### Dependency versions + +- weave-gitops v0.9.6 +- cluster-controller v1.3.2 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 1.2.1 + +## v0.9.5 +2022-09-22 + +### Highlights +- **Tenancy** + - `gitops create tenant` now supports `--prune` to remove old resources from the cluster if you're not using `--export` with gitops. + - `deploymentRBAC` section in `tenancy.yaml` allows you to specify the permissions given to the flux `Kustomizations` that will apply the resources from git to your tenants' namespaces in the cluster. + - Support for `OCIRepository` sources when restricting/allowing the sources that can be applied into tenants' namespaces. +- **Templates** + - Templates now support helm functions for simple transformations of values: `{{ .params.CLUSTER_NAME | upper }}` + - Templates has moved to its own page in the UI, this is the first step in moving towards embracing them as a more generic feature, not just for cluster creation. + - If a version is not specified in a **template profile annotation** it can be selected by the user. + - A `namespace` can be specified in the **template profile annotation** that will be provided as the `HelmRelease`'s `targetNamespace` by default. +- **Bootstrapping** + - A ClusterBootstrapConfig can now optionally be triggered when `phase="Provisioned"`, rather than `ControlPlaneReady=True` status. + +### Dependency versions + +- weave-gitops v0.9.5 +- cluster-controller v1.3.2 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 1.1.0 + +### Known issues + +- [UI] Notifications page shows a 404 instead of the notification-controller's configuration. + +### ⚠️ Breaking changes from v0.9.4 + +If using the policy-agent included in the weave-gitops-enterprise helm chart, the configuration should now be placed under the `config` key. + +**old** +```yaml +policy-agent: + enabled: true + accountId: "my-account" + clusterId: "my-cluster" +``` + +**new** +```yaml +policy-agent: + enabled: true + config: + accountId: "my-account" + clusterId: "my-cluster" +``` diff --git a/website/versioned_docs/version-0.24.0/secrets/assets/sops-bootstrap-job.yaml b/website/versioned_docs/version-0.24.0/secrets/assets/sops-bootstrap-job.yaml new file mode 100644 index 0000000000..255b9525d1 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/assets/sops-bootstrap-job.yaml @@ -0,0 +1,68 @@ +apiVersion: capi.weave.works/v1alpha1 +kind: ClusterBootstrapConfig +metadata: + name: sops-installation + namespace: default +spec: + clusterSelector: + matchLabels: + weave.works/flux: "bootstrap" + jobTemplate: + generateName: "run-gitops-flux-{{ .ObjectMeta.Name }}" + spec: + containers: + - image: ghcr.io/fluxcd/flux-cli:v0.35.0 + imagePullPolicy: Always + name: flux-bootstrap + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/gitops" + readOnly: true + args: + [ + "bootstrap", + "github", + "--kubeconfig=/etc/gitops/value", + "--owner=", # to be changed + "--repository=", # to be changed + "--path=./clusters/{{ .ObjectMeta.Namespace }}/{{ .ObjectMeta.Name }}", + ] + envFrom: + - secretRef: + name: my-pat # github token secret for flux: see https://docs.gitops.weave.works/docs/cluster-management/getting-started/ + env: + - name: EXP_CLUSTER_RESOURCE_SET + value: "true" + - image: weaveworks/sops-bootstrap:0.1.0 + imagePullPolicy: Always + name: sops-bootstrap + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/gitops" + readOnly: true + command: ["bash", "/root/entrypoint.sh"] + envFrom: + - secretRef: + name: my-pat # github token secret for flux: see https://docs.gitops.weave.works/docs/cluster-management/getting-started/ + env: + - name: KEY_NAME + value: '{{ annotation "weave.works/sops-key-name" }}' + - name: KEY_COMMENT + value: '{{ annotation "weave.works/sops-key-comment" }}' + - name: SOPS_SECRET_REF + value: '{{ annotation "weave.works/sops-secret-ref" }}' + - name: SOPS_SECRET_REF_NAMESPACE + value: '{{ annotation "weave.works/sops-secret-ref-namespace" }}' + - name: PUSH_TO_GIT + value: '{{ annotation "weave.works/sops-push-to-git" }}' + - name: CLUSTER_NAME + value: "{{ .ObjectMeta.Name }}" + - name: CLUSTER_NAMESPACE + value: "{{ .ObjectMeta.Namespace }}" + restartPolicy: Never + volumes: + - name: kubeconfig + secret: + secretName: "{{ .ObjectMeta.Name }}-kubeconfig" diff --git a/website/versioned_docs/version-0.24.0/secrets/assets/template-annotations.yaml b/website/versioned_docs/version-0.24.0/secrets/assets/template-annotations.yaml new file mode 100644 index 0000000000..850195e8da --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/assets/template-annotations.yaml @@ -0,0 +1,7 @@ +# annotation to hold the kustomization values for cluster bootstrap job +weave.works/sops-kustomization: "${SOPS_KUSTOMIZATION_NAME}" +weave.works/sops-secret-ref: "${SOPS_SECRET_REF}" +weave.works/sops-secret-ref-namespace: "${SOPS_SECRET_REF_NAMESPACE}" +weave.works/sops-push-to-git: "${SOPS_PUSH_TO_GIT}" +weave.works/sops-key-name: "${SOPS_KEY_NAME}" +weave.works/sops-key-comment: "${SOPS_KEY_COMMENT}" diff --git a/website/versioned_docs/version-0.24.0/secrets/assets/template-params.yaml b/website/versioned_docs/version-0.24.0/secrets/assets/template-params.yaml new file mode 100644 index 0000000000..99d430d741 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/assets/template-params.yaml @@ -0,0 +1,18 @@ +- name: SOPS_KUSTOMIZATION_NAME + required: true + description: This Kustomization will be used to decrypt SOPS secrets from this path `clusters/default/leaf-cluster/sops/` after reconciling on the cluster. example (`my-secrets`) +- name: SOPS_SECRET_REF + required: true + description: The private key secret name that will be generated by SOPS in the bootstrap job. example (`sops-gpg`) +- name: SOPS_SECRET_REF_NAMESPACE + required: true + description: The private key secret namespace this secret will be generated by SOPS in the bootstrap job. example (`flux-system`) +- name: SOPS_KEY_NAME + required: true + description: SOPS key name. This will be used to generate SOPS keys. example (`test.yourdomain.com`) +- name: SOPS_KEY_COMMENT + required: true + description: SOPS key comment. This will be used to generate SOPS keys. example (`sops secret comment`) +- name: SOPS_PUSH_TO_GIT + required: true + description: Option to push the public key to the git repository. expected values (`true`, `false`) diff --git a/website/versioned_docs/version-0.24.0/secrets/bootstraping-secrets.mdx b/website/versioned_docs/version-0.24.0/secrets/bootstraping-secrets.mdx new file mode 100644 index 0000000000..4553f73552 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/bootstraping-secrets.mdx @@ -0,0 +1,156 @@ +--- +title: Bootstrapping Secrets +hide_title: true +--- +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +import TierLabel from "./../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +When accessing protected resources there is a need for a client to authenticate before +the access is granted and the resource is consumed. For authentication, a client presents +credentials that are either created manually or available through infrastructure. A common scenario +is to have a secrets store. + +Weave Gitops allows you to provision the secret management infrastructure as part of its capabilities. +However, in order to provision, as any other application that has secrets, we need to create the secret needed for installing it. +This is known as a chicken-egg scenario that get addressed by providing an initial secret. This secret we call it +bootstrapping secret. + +Bootstrapping secrets comes in handy, not only while provisioning your secrets management solution, +but also in any platform provisioning task where the existence of the secret is a prerequisite. +Another common example could be provisioning platform capabilities via [profiles](../cluster-management/getting-started.mdx#profiles-and-clusters) +that are stored in [private repositories](https://fluxcd.io/flux/guides/helmreleases/#helm-repository-authentication-with-credentials). + +Weave Gitops provides [SecretSync](#secretsync) as a solution to managing your bootstrapping secrets. + +## SecretSync + + + +`SecretSync` is a [Kubernetes Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +that provides semantics to sync [Kuberentes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) from management cluster to leaf clusters. + +An example could be seen below: + +```yaml +apiVersion: capi.weave.works/v1alpha1 +kind: SecretSync +metadata: + name: my-dev-secret-syncer + namespace: default +spec: + clusterSelector: + matchLabels: + environment: dev + secretRef: + name: my-dev-secret + targetNamespace: my-namespace +``` +Where you could find the following configuration sections: + +1) Select the secret to sync: + +```yaml + secretRef: + name: my-dev-secret +``` + +2) Specify the [GitopsClusters](../cluster-management/managing-existing-clusters.mdx) +that the secret will be synced to via labels: + +```yaml + clusterSelector: + matchLabels: + environment: dev +``` + +`Secretsync` reconciles secrets on clusters: any cluster at a future time matching the label selector will have +the secret reconciled too. + +More info about the CRD spec [here](./spec/v1alpha1/secretSync.mdx) + +### Working with SecretSync + +#### Pre-requisites + +1. You are using [Weave Gitops Enterprise version > v0.19.0](../releases.mdx) +2. You have a set of GitopsClusters representing the clusters that you want to sync the secret to. They have a set of labels to allow matching. + +
Expand to see example + +```yaml +apiVersion: gitops.weave.works/v1alpha1 +kind: GitopsCluster +metadata: + namespace: flux-system + labels: + environment: dev +``` +
+ +3. You have created a secret that you want to sync from the management cluster. + +
Expand to see example + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-dev-secret + namespace: flux-system +type: Opaque +``` +
+ +:::info +Some restriction apply to the current version: +- Resources should be in the same namespace +- Leaf cluster nodes should be annotated with `node-role.kubernetes.io/control-plane` +::: + +#### Syncing secrets via SecretSync + +1. Create SecretSync manifests that points to your secret. For example: + +```yaml +apiVersion: capi.weave.works/v1alpha1 +kind: SecretSync +metadata: + name: my-dev-secret-syncer + namespace: default +spec: + clusterSelector: + matchLabels: + environment: dev + secretRef: + name: my-dev-secret + targetNamespace: my-namespace +``` + +2. Check-in to your configuration repo within your management cluster + +3. Create a PR, review and merge + +4. See the progress + +Once reconciled, the status section would show created secret resource version + +``` +status: + versions: + leaf-cluster-1: "211496" +``` + +5. See the secret created in your leaf cluster + +Your secret has been created in the target leaf cluster + +```bash +➜ kubectl get secret -n default +NAME TYPE DATA +my-dev-secret Opaque 1 +``` diff --git a/website/versioned_docs/version-0.24.0/secrets/getting-started.mdx b/website/versioned_docs/version-0.24.0/secrets/getting-started.mdx new file mode 100644 index 0000000000..124ec7894c --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/getting-started.mdx @@ -0,0 +1,141 @@ +--- +title: Getting Started +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import AlphaWarning from "../_components/_alpha_warning.mdx"; + +# Getting started with secrets management + + + +This guide shows you a basic experience to get started with Weave Gitops Secrets. +It covers the scenario of setting up the capability in a test environment and how to use it for your applications. + +## Requirements +- You have a test Weave Gitops Enterprise environment with Flux installed. +- You have a secret in AWS secrets manager. + +## Add the secrets infra + +In order to be able to manage external secrets stores and secrets, add `external-secrets` application from `weaveworks-charts` profiles repository. + +:::tip +[Here](../cluster-management/add-applications.mdx) you could refresh how to add applications. +::: + + +![add infra profile](imgs/getting-started-add-infra.png) + +Include via `values.yaml` the configuration to deploy the [SecretStore](https://external-secrets.io/v0.8.1/api/secretstore/) +connecting to AWS Secrets Manager. + +
Expand to see an example + +```yaml + values: + secretStores: + enabled: true + path: ./clusters/bases/secrets + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system +``` +This example points to the path `clusters/bases/secrets` in our configuration repo where a kustomization exists + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- aws-secrets-manager.yaml +``` + +With the AWS Secrets Manager secret store + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: aws-secrets-manager + namespace: flux-system +spec: + provider: + aws: + auth: + secretRef: + accessKeyIDSecretRef: + key: access-key + name: awssm-secret + secretAccessKeySecretRef: + key: secret-access-key + name: awssm-secret + region: eu-north-1 + service: SecretsManager +``` +
+ +Review and merge the PR and see it available in your cluster + +![infra profile reconciled](imgs/getting-started-setup-infra.png) + +## Create the secret + +Given you have a secret in AWS Secrets Manager for example `test/search/db`. + +![aws secret](imgs/getting-started-secret-asm.png) + +Create the External Secret manifest via [Secrets UI](./manage-secrets-ui.mdx) to pull the secret from your store into your environment. + +![external secret](imgs/getting-started-create-secret-manifest.png) + +See it available in your cluster. + +![setup secret stores](imgs/getting-started-secret.png) + +## Use the secret + +At this stage you have everything you need for your application to [consume the secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-a-secret). +Add it to your application as usual. + +
Expand to see example + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: secret-dotfiles-pod +spec: + volumes: + - name: database-secrets + secret: + secretName: search-database + containers: + - name: dotfile-test-container + image: registry.k8s.io/busybox + command: + - ls + - "-l" + - "/etc/database-secrets" + volumeMounts: + - name: database-secrets + readOnly: true + mountPath: "/etc/database-secrets" +``` +
+ +You could see the expected secret available + +```bash +kubectl logs -f secret-dotfiles-pod + +total 0 +lrwxrwxrwx 1 root root 15 Apr 5 17:26 password -> ..data/password +``` + +## Next steps? + +- For other setup scenarios using external secrets, see [setup ESO](./setup-eso.mdx) +- For SOPS secrets, see [setup SOPS](./setup-sops.mdx) +- To discover the UI capabilities to manage secrets, see [here](./manage-secrets-ui.mdx) diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-1.png b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-1.png new file mode 100644 index 0000000000..e1f40008c8 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-1.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-2.png b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-2.png new file mode 100644 index 0000000000..fc2aab80c2 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-2.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-3.png b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-3.png new file mode 100644 index 0000000000..6ff80ddbec Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-3.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-sops.png b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-sops.png new file mode 100644 index 0000000000..493841d02e Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/create-secret-sops.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/eso-details-1.png b/website/versioned_docs/version-0.24.0/secrets/imgs/eso-details-1.png new file mode 100644 index 0000000000..4aed248f83 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/eso-details-1.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/external-secret-events-1.png b/website/versioned_docs/version-0.24.0/secrets/imgs/external-secret-events-1.png new file mode 100644 index 0000000000..a237efc441 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/external-secret-events-1.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-add-infra.png b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-add-infra.png new file mode 100644 index 0000000000..c185225a2c Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-add-infra.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-add-infra2.png b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-add-infra2.png new file mode 100644 index 0000000000..264f9bccc3 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-add-infra2.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-create-secret-manifest.png b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-create-secret-manifest.png new file mode 100644 index 0000000000..9892a5e9c6 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-create-secret-manifest.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-secret-asm.png b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-secret-asm.png new file mode 100644 index 0000000000..3d40bde3d1 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-secret-asm.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-secret.png b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-secret.png new file mode 100644 index 0000000000..013f1ca49a Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-secret.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-setup-config.png b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-setup-config.png new file mode 100644 index 0000000000..eae866dbb2 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-setup-config.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-setup-infra.png b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-setup-infra.png new file mode 100644 index 0000000000..a6f2d61b40 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/getting-started-setup-infra.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/secretes-overview-1.png b/website/versioned_docs/version-0.24.0/secrets/imgs/secretes-overview-1.png new file mode 100644 index 0000000000..1c3c5cd03a Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/secretes-overview-1.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/secrets-overview-2.png b/website/versioned_docs/version-0.24.0/secrets/imgs/secrets-overview-2.png new file mode 100644 index 0000000000..1b54cbc84f Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/secrets-overview-2.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/sops-secret-pr.png b/website/versioned_docs/version-0.24.0/secrets/imgs/sops-secret-pr.png new file mode 100644 index 0000000000..2b1f9e0069 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/sops-secret-pr.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/imgs/sops.png b/website/versioned_docs/version-0.24.0/secrets/imgs/sops.png new file mode 100644 index 0000000000..c5e635edd1 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/secrets/imgs/sops.png differ diff --git a/website/versioned_docs/version-0.24.0/secrets/intro.mdx b/website/versioned_docs/version-0.24.0/secrets/intro.mdx new file mode 100644 index 0000000000..393851f3f5 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/intro.mdx @@ -0,0 +1,35 @@ +--- +title: Overview +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Secrets Management + +Secrets are sensitive information such as passwords, access keys, and other credentials that should not be exposed publicly. In cloud-native applications, secrets are often used to authenticate and authorize access to various resources, such as databases, APIs, and other services. + +In a GitOps environment, secrets are typically stored either encrypted in Git, or using Custom Resources that reference the secret in an external secret store. Secrets are then synced into the clusters and securely passed to the application containers or workloads. + +Effective secrets management in cloud-native applications and GitOps environments is critical for maintaining the security and compliance of the overall system. Best practices include regularly rotating secrets, using strong encryption and access controls, and implementing robust auditing and monitoring processes. + +## Weave Gitops Secrets Management + +Weave GitOps Secrets Management is a set of features that makes it easier for teams to manage secrets in a GitOps environment across multiple clusers. These features provide an automated way to manage secrets effectively, and make it easier for different personas to work with secrets. + +For Developers, they can use Weave GitOps Secrets Management to securely create and track application secrets such as API keys, passwords, and other credentials. They can do that using Weave GitOps UI in a self-serve manner. + +For Operation Teams, they can use Weave GitOps Secrets Management to help set up secure and reliable flows for developers to create and consume secrets for their applications. + +Weave GitOps Secrets Management supports integrations with SOPS and External Secrets Operator (ESO) to provide a secure and automated way to manage secrets in a GitOps environment, while giving the option for customers to choose any of these secrets operators or working with both of them. + +For SOPS and ESO operators, Weave GitOps is providing different ways to do the following: +* Setup Secrets Operators ([SOPS](./setup-sops.mdx) | [ESO](./setup-eso.mdx)) +* [Bootstrap Secrets into clusters](./bootstraping-secrets.mdx) +* [Manage Secrets through Weave GitOps UI](./manage-secrets-ui.mdx) + +In order to get started with WeaveGitOps Secrets Management, please follow this guide [here](./getting-started.mdx). diff --git a/website/versioned_docs/version-0.24.0/secrets/manage-secrets-ui.mdx b/website/versioned_docs/version-0.24.0/secrets/manage-secrets-ui.mdx new file mode 100644 index 0000000000..9c03817f6e --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/manage-secrets-ui.mdx @@ -0,0 +1,124 @@ +--- +title: Manage Secrets UI +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +At Weave GitOps Enterprise (WGE), we support two approaches for creating and managing secrets: [External Secrets Operator](https://external-secrets.io/v0.8.1/) and [Mozilla SOPS](https://fluxcd.io/flux/guides/mozilla-sops/). In this guide, we will provide an overview of both approaches and explain how to use the UI to create and manage secrets. + +Clicking on the Secrets under the Platform section in the left hand menu will bring you to the secrets page where you can create external secrets, sops secrets, and view the external secrets list. + +## External Secrets + +### Prerequisites + +Setup the External Secrets Operator by following [this](./setup-eso.mdx) guide. + +### Create External Secret CR + +To create a new `ExternalSecret` CR, start by clicking on to the `Create External Secret` button to navigate to the creation page. + +![Secret list](./imgs/secretes-overview-1.png) + +![Create new Secret](./imgs/create-secret-1.png) + +Here, you will be prompted to enter the `External Secret Name` and the `Target K8s Secret Name`. Once you choose the `Target Cluster`, you will find a new list of all the `Secret Stores` on this cluster to choose from. + +It's important to note that the chosen `SecretStore` may be a cluster-scoped `SecretStore` ie: `ClusterSecretStore` or a namespace-scoped `SecretStore`. + +![Create new Secret](./imgs/create-secret-2.png) + +If you choose a namespace scoped `SecretStore`, the new secret will be created on the same namespace as the `SecretStore`. + +![Create new Secret](./imgs/create-secret-3.png) + +If you choose a cluster-scoped `ClusterSecretStore`, the new secret will be created in a namespace of your choice. + +This process allows you to easily create new `ExternalSecret` CRs without needing to manually create them through YAML files or command line tools. + +### List External Secrets + +![Secrets list](./imgs/secrets-overview-2.png) + +The ExternalSecrets List section of the UI allows you to view all the external secrets that are currently stored in your Kubernetes clusters. This section provides an overview of each external secret, including its name, namespace, cluster, k8s-secret, secret-store and the age. From this page, you can also navigate to the details page to view more information about a specific secret. + +### External Secret Details + +![External Secret Details](./imgs/eso-details-1.png) + +The details page displays the details of a specific external secret, including its name, namespace, data, and creation date. Below are the details that you can expect to see on this page: + +- **Status:** This indicates the current status of the external secret, which can be "Ready" or "Not Ready" depending on whether the external secret has been successfully created and is ready for use. +- **Last Updated:** This shows the date and time when the external secret was last updated. +- **External Secret:** This is the name of the external secret that you are viewing. +- **K8s Secret:** This is the name of the Kubernetes secret that is associated with the external secret. +- **Cluster:** This indicates which cluster the external secret exists on. +- **Secret Store:** This shows the name of the secret store provider that is being used to store the external secret. +- **Secret Store Type:** This indicates the type of secret store that is being used to store the external secret. In this case, the type is "AWS Secrets Manager". +- **Secret path:** This is the path to the external secret within the secret store. +- **Property:** This is the property or key that is associated with the external secret. +- **Version:** This shows the version of the external secret, which may be blank if no version has been specified. + +Understanding the information provided on the details page can help you to manage and troubleshoot your external secrets as needed. + +### Understanding Events + +![External Secret Events](./imgs/external-secret-events-1.png) + +The following events can be expected when using the UI to manage external secrets: + +- **Updated:** This event indicates that an existing external secret has been successfully updated with new data. +- **Not Ready:** This event indicates that there was an issue with the secret store when trying to access or synchronize external secrets. This includes situations such as the secret store being unavailable or not ready to handle requests, or issues with authentication when trying to access the secret store. + +Understanding these events can help you to troubleshoot issues that may arise when managing external secrets using the UI. In particular, if you encounter a `Not Ready` event, you may need to check your secret store credentials and ensure that the secret store is operational before proceeding with any further actions. + +## SOPS + +### Getting Started with SOPS + +Creating a [SOPS](https://github.com/mozilla/sops#usage) secret involves using the SOPS tool to encrypt a file containing sensitive information, such as credentials or API keys. This encrypted file can then be stored securely in version control or another location, with only authorized users able to decrypt it using their own private key. This adds an additional layer of security to sensitive data, reducing the risk of unauthorized access or accidental exposure. + +### Prerequisites + +For more information about how to generate OpenPGP/age keys and configure your cluster to work with Weave GitOps Enterprise secrets management follow [this](./setup-sops.mdx) guide. + +### Create SOPS Secret + +To create a new SOPS secret, start by clicking on the `Create Sops Secret` button. + +![Secrets Overview](./imgs/secretes-overview-1.png) + +This will open the create form where you can specify the details of your new secret. First, choose the `Cluster` where you want to create the secret. Then, enter a `name` for your secret and select the `namespace` where it will be created. + +![Create Secret SOPS](./imgs/create-secret-sops.png) + +Next, select the `encryption method` that you want to use - currently, only GPG/AGE encryption is supported. Finally, choose the `kustomization` that will be used by SOPS to decrypt the secret, as well as, having the public key info that will be used to encrypt the secret data. Afterwards, add your `key-value` pairs of your secrets. +It's important to note that the `value` input will be encoded to base64. + +The generated secret should be something like below. + +![Create Secret SOPS PR](./imgs/sops-secret-pr.png) + +After approving the pull request, Flux will reconcile it to your cluster. To verify that the secret has been successfully created, you can use the following command to retrieve it as YAML: + +```bash +kubectl get secret secretTest-default-sops-secret -n default -o yaml +``` + +which will give the following output: + +```yaml +apiVersion: v1 +data: + secret-1: dmFsCg== +kind: Secret +metadata: + name: secretTest-default-sops-secret + namespace: default +type: Opaque +``` diff --git a/website/versioned_docs/version-0.24.0/secrets/setup-eso.mdx b/website/versioned_docs/version-0.24.0/secrets/setup-eso.mdx new file mode 100644 index 0000000000..dd348a3990 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/setup-eso.mdx @@ -0,0 +1,74 @@ +--- +title: Setup ESO +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +

+ {frontMatter.title} +

+ +Weave GitOps Enterprise now supports managing secrets using [External Secrets Operator](https://external-secrets.io/v0.8.1/) from the [UI](./manage-secrets-ui.mdx#external-secrets). External Secrets Operator is a Kubernetes operator that allows users to use secrets from external secrets management systems by reading their information using external APIs and injecting their values into Kubernetes secrets. To be able to use this functionality, users need to configure their External Secrets Operator and SecretStores using one of the guides below. + +## Prerequisites + +### SecretStores + +You should have your [SecretStore CRs](https://external-secrets.io/v0.8.1/) defined in a git repository. Those CRs will be installed to your cluster in the following steps and used by the creation UI. + +### ESO Profile + +The [ESO profile](https://github.com/weaveworks/weave-gitops-profile-examples/tree/main/charts/external-secrets) is packaged with the [weaveworks-charts](https://github.com/weaveworks/weave-gitops-profile-examples). If you have the usual profiles setup, you will not need to do anything extra. +This profile installs the ESO controller, all the required CRDs, and the SecretStore CRs defined in the [previous](./#secretstores) step. + +### Secrets + +There are several Kubernetes Secrets that need to exist on your management cluster for the whole flow to work. + +If your SecretStores repository is private then you'll need a Secret, that contains the repo credentials, to access the repository. This is usually the [Secret](../cluster-management/getting-started.mdx#add-a-cluster-bootstrap-config) you created while bootstrapping flux on the management cluster and is copied to your leaf cluster during creation. + +For each SecretStore CR, you'll need to add a Secret, that follows the format expected by this CR, to allow the operator to access the defined External Secret Store. + +Follow this [guide](/secrets/bootstraping-secrets.mdx) for bootstraping those secrets on leaf clusters. + +## Installation Steps + +### Install ESO on management cluster or existing leaf cluster + +To install the ESO profile on an exisitng cluster, use `Add an application` from the `Applications` page and select `external-secrets` from `weaveworks-charts`. Check the [Profile values](./#profile-values) section for more info about configuring the `values.yaml`. + +### Install ESO on leaf cluster + +To bootstrap the ESO profile on a leaf cluster, select `external-secrets` from the profiles dropdown in the `Create Cluster` page. Check the [Profile values](./#profile-values) section for more info about configuring the `values.yaml`. + +### Profile values + +You should then configure the `values.yaml` to install the `SecretStores` on the cluster from a `GitRepository`. +This is done by configuring the `secretStores` section. + +
Expand to see an example that creates a new git source from a specific tag + +```yaml +secretStores: + enabled: true + url: ssh://git@github.com/github-owner/repo-name # url for the git repository that contains the SecretStores + tag: v1.0.0 + path: ./ # could be a path to the secrets dir or a kustomization.yaml file for the SecretStore in the GitRepository + secretRef: my-pat # the name of the Secret containing the repo credentials for private repositories +``` +
+ +
Expand to see an example that uses an existing git source + +```yaml +secretStores: + enabled: true + sourceRef: # Specify the name for an existing GitSource reference + kind: GitRepository + name: flux-system + namespace: flux-system +``` +
diff --git a/website/versioned_docs/version-0.24.0/secrets/setup-sops.mdx b/website/versioned_docs/version-0.24.0/secrets/setup-sops.mdx new file mode 100644 index 0000000000..b8f89ed24a --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/setup-sops.mdx @@ -0,0 +1,363 @@ +--- +title: Setup SOPS +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; + +import SopsBootstrapJob from "!!raw-loader!./assets/sops-bootstrap-job.yaml"; +import TemplateParams from "!!raw-loader!./assets/template-params.yaml"; +import TemplateAnnotations from "!!raw-loader!./assets/template-annotations.yaml"; + +

+ {frontMatter.title} +

+ +Weave GitOps Enterprise now supports managing secrets using SOPS, a tool that encrypts and decrypts secrets using various key management services, from the [UI](./manage-secrets-ui.mdx#sops). To be able to use this functionality, users need to configure their private and public key-pairs using one of the guides below. + +## Setup SOPS on management cluster or existing leaf cluster + +In this section, we will cover the prerequisites for using [SOPS](https://github.com/mozilla/sops) with Weave GitOps Enterprise, and how to configure SOPS for your existing Kubernetes cluster to work with GPG and age keys. + +For a more advanced setup for SOPS with flux, please refer to this [guide](https://fluxcd.io/flux/guides/mozilla-sops/). + +### Encrypting secrets using GPG/OpenPGP + +OpenPGP is a way of using SOPS to encrypt and decrypt secrets with Weave GitOps Enterprise. + +Here are the steps to generate an OpenPGP key and configure your cluster to work with Weave GitOps Enterprise secrets management. + +1- Generate a gpg key pairs + +
Expand for instructions + +```bash +export KEY_NAME="gpg-key" +export KEY_COMMENT="gpg key" + +gpg --batch --full-generate-key < + +2- Export the key pairs fingerprint in the shell + +```bash +gpg --list-secret-keys "${KEY_NAME}" + +sec rsa4096 2020-09-06 [SC] + 710DC0DB6C1662F707095FC30233CB21E656A3CB + +export KEY_FP="710DC0DB6C1662F707095FC30233CB21E656A3CB" +``` + +3- Export the generated private key to a kubernetes secret `sops-gpg-private-key` which will be used by flux's kustomize-controller to decrypt the secrets using sops. + +```bash +gpg --export-secret-keys --armor "${KEY_FP}" | +kubectl create secret generic sops-gpg-private-key \ +--namespace=flux-system \ +--from-file=sops.asc=/dev/stdin +``` + +4- Export the generated public key to a kubernetes secret `sops-gpg-public-key` which will be used by Weave GitOps Enterprise to encrypt the secrets created from the UI. + +```bash +gpg --export --armor "${KEY_FP}" | +kubectl create secret generic sops-gpg-public-key \ +--namespace=flux-system \ +--from-file=sops.asc=/dev/stdin +``` + +:::tip + It's recommended to remove the secret from your machine + +```bash +gpg --delete-secret-keys "${KEY_FP}" +``` +::: + +5- Create a kustomization for reconciling the secrets on the cluster and set the `--decryption-secret` flag to the name of the private key created in step 3. + +```bash +flux create kustomization gpg-secrets \ +--source=secrets \ # the git source to reconcile the secrets from +--path=./secrets/gpg \ +--prune=true \ +--interval=10m \ +--decryption-provider=sops \ +--decryption-secret=sops-gpg-private-key +``` + +6- Annotate the kustomization object created in the previous step with the name and namespace of the public key created in step 4. + +```bash +kubectl annotate kustomization gpg-secrets \ +sops-public-key/name=sops-gpg-public-key \ +sops-public-key/namespace=flux-system \ +-n flux-system +``` + +
Expand to see the expected kustomization object + +```yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: gpg-secrets + namespace: flux-system + annotations: + sops-public-key/name: sops-gpg-public-key + sops-public-key/namespace: flux-system +spec: + interval: 10m + sourceRef: + kind: GitRepository + name: secrets + path: ./secrets/gpg + decryption: + provider: sops + secretRef: + name: sops-gpg-private-key + prune: true + validation: server +``` + +
+ +:::note +This is an essential step in order to allow other operators and developers to utilize WeaveGitOps UI to encrypt SOPS secrets using the public key secret in the cluster. +::: + +### Encrypting secrets using age + +[age](https://github.com/FiloSottile/age) is a simple, modern and secure file encryption tool, that can be used to encrypt secrets using Weave GitOps Enterprise. + +Here are the steps to generate an age key and configure your cluster to work with Weave GitOps Enterprise secrets management. + +1- Generate an age key with age-keygen + +```bash +age-keygen -o age.agekey + +Public key: +``` + +2- Export the generated private key to a kubernetes secret `sops-age-private-key` which will be used by flux's kustomize-controller to decrypt the secrets using sops. + +```bash +cat age.agekey | +kubectl create secret generic sops-age-private-key \ +--namespace=flux-system \ +--from-file=age.agekey=/dev/stdin +``` + +4- Export the generated public key to a kubernetes secret `sops-age-public-key` which will be used by Weave GitOps Enterprise to encrypt the secrets created from the UI. + +```bash +echo "" | +kubectl create secret generic sops-age-public-key \ +--namespace=flux-system \ +--from-file=age.agekey=/dev/stdin +``` + +:::tip +It's recommended to remove the secret from your machine + +```bash +rm -f age.ageKey +``` + +::: + +5- Create a kustomization for reconciling the secrets on the cluster and set the `--decryption-secret` flag to the name of the private key created in step 2. + +```bash +flux create kustomization age-secrets \ +--source=secrets \ # the git source to reconcile the secrets from +--path=./secrets/age \ +--prune=true \ +--interval=10m \ +--decryption-provider=sops \ +--decryption-secret=sops-age-private-key +``` + +6- Annotate the kustomization object created in the previous step with the name and namespace of the public key created in step 4. + +```bash +kubectl annotate kustomization age-secrets \ +sops-public-key/name=sops-age-public-key \ +sops-public-key/namespace=flux-system \ +-n flux-system +``` + +
Expand to see the expected kustomization object + +```yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: age-secrets + namespace: flux-system + annotations: + sops-public-key/name: sops-age-public-key + sops-public-key/namespace: flux-system +spec: + interval: 10m + sourceRef: + kind: GitRepository + name: secrets + path: ./secrets/age + decryption: + provider: sops + secretRef: + name: sops-age-private-key + prune: true + validation: server +``` + +
+ +:::note + This is an essential step in order to allow other operators and developers to utilize WeaveGitOps UI to encrypt SOPS secrets using the public key secret in the cluster. +::: + +:::tip + In case of using OpenPGP and age in the same cluster, you need to make the kustomizations point to different directories. This is because flux's kustomize-controller expects that all the secrets in the kustomization's path are encrypted with the same key. +::: + +## Bootstrapping SOPS to leaf clusters + +Bootstrapping SOPS to leaf clusters in WGE can be done by utilizing `ClusterBootstrapConfig` job to bootstrap Flux and SOPS. +The job is a container which generates SOPS secrets key pair, creates a kubernetes secret with the private key, creates a kubernetes secret with the public key (to be used in self-serve flow) and the proper rbac for it. +As well as an option to push the public key to the git repository via a PR (to be distributed). + +### Prerequisites + +#### ClusterBootstrapConfig job + +The following example is using GPG encryption to install SOPS and generate keys when bootstrapping leaf clusters. Create the following `ClusterBootstrapConfig` CR and push it to your fleet repo. + +
Expand to view + + + {SopsBootstrapJob} + + +
+ +#### Cluster template updates + +In order to bootstrap SOPS to leaf clusters, we need some modifications to the cluster template to allow creating a [Kustomization](https://fluxcd.io/flux/guides/mozilla-sops/#configure-in-cluster-secrets-decryption) +for reconciling the secrets on the cluster using SOPS and to run the `ClusterBootstrapConfig` job during cluster creation. + +The template metadata should have annotation, it will be used by WGE to create the Kustomization with the cluster files. + +```yaml +templates.weave.works/sops-enabled: "true" +``` + +The template should have the following parameters that are needed for the Kustomization + +
Expand to view + + + {TemplateParams} + + +
+ +The template should have the following annotations under `GitOpsCluster` to be used in the bootstrap job + +
Expand to view + + + {TemplateAnnotations} + + +
+ +### Installation Steps + +To bootstrap SOPS on a leaf cluster, create a new cluster using the SOPS template from the `Create Cluster` page and fill in the following SOPS-related values in the form: + +- `SOPS_KUSTOMIZATION_NAME`: This Kustomization will be used to decrypt SOPS secrets from this path `clusters/default/leaf-cluster/sops/` after reconciling on the cluster. example (`my-secrets`) +- `SOPS_SECRET_REF`: The private key secret name that will be generated by SOPS in the bootstrap job. example (`sops-gpg`) +- `SOPS_SECRET_REF_NAMESPACE`: The private key secret namespace this secret will be generated by SOPS in the bootstrap job. example (`flux-system`) +- `SOPS_KEY_NAME`: SOPS key name. This will be used to generate SOPS keys. example (`test.yourdomain.com`) +- `SOPS_KEY_COMMENT`: SOPS key comment. This will be used to generate SOPS keys. example (`sops secret comment`) +- `SOPS_PUSH_TO_GIT`: Option to push the public key to the git repository. expected values (`true`, `false`) + +![Bootstrap SOPS](./imgs/sops.png) + +### What to expect + +- A leaf cluster created with Flux & SOPS bootstrapped +- A secret created on leaf cluster `sops-gpg` to decrypt secrets +- A secret created on leaf cluster `sops-gpg-pub` to encrypt secrets +- A Kustomization with `decryption` defined in it to `SOPS` location in the cluster repo location +- Added Role for the public key to be accessed through management cluster +- A PR is created to the cluster repo with the public key and SOPS creation rules (optional) +- Visit the Secrets Page and start managing your secrets via the [UI](./manage-secrets-ui.mdx) + +## Security Recommendations + +Access to sops decryption secrets should be restricted and allowed only to be read by flux's kustomize controller. This can be done using Kubernetes RBAC. + +Here's an example of how you can use RBAC to restrict access to sops decryption secrets: + +1. Create a new Kubernetes role that grants read access to sops decryption secrets + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: sops-secrets-role +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["sops-gpg-private-key", "sops-age-private-key"] + verbs: ["get", "watch", "list"] +``` + +2. Bind the role to the service account of the flux's kustomize-controller + +
Expand to view the RoleBinding + + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: sops-secrets-rolebinding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: sops-secrets-role + subjects: + - kind: ServiceAccount + name: kustomize-controller + ``` + +
+ +:::caution +You would need to ensure that no other rolebindings or clusterrolebndings would allow reading the the decryption secret at any time. This could be achieved by leveraging policy capabilities to detect existing and prevent future creation of roles that would grant read secrets permissions. +::: diff --git a/website/versioned_docs/version-0.24.0/secrets/spec/index.mdx b/website/versioned_docs/version-0.24.0/secrets/spec/index.mdx new file mode 100644 index 0000000000..6759cf2999 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/spec/index.mdx @@ -0,0 +1,8 @@ +--- +title: Secret versions +hide_title: true +--- + +## Versions + +- [v1alpha1](./v1alpha1/secretSync) diff --git a/website/versioned_docs/version-0.24.0/secrets/spec/v1alpha1/secretSync.mdx b/website/versioned_docs/version-0.24.0/secrets/spec/v1alpha1/secretSync.mdx new file mode 100644 index 0000000000..0fa59cbf96 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/secrets/spec/v1alpha1/secretSync.mdx @@ -0,0 +1,61 @@ +--- +title: SecretSync +hide_title: true +--- +import TierLabel from "../../../_components/TierLabel"; + +# SecretSync + +It provides semantics to sync [Kuberentes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) from management cluster to leaf clusters. + +```yaml +apiVersion: capi.weave.works/v1alpha1 +kind: SecretSync +metadata: + name: my-dev-secret-syncer + namespace: default +spec: + clusterSelector: + matchLabels: + environment: dev + secretRef: + name: my-dev-secret + targetNamespace: my-namespace +``` + +## Specification + +The documentation for the api version `capi.weave.works/v1alpha1` + +```go +type SecretSync struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SecretSyncSpec `json:"spec,omitempty"` + Status SecretSyncStatus `json:"status,omitempty"` +} + +// SecretSyncSpec +type SecretSyncSpec struct { + // Label selector for Clusters. The Clusters that are + // selected by this will be the ones affected by this SecretSync. + // It must match the Cluster labels. This field is immutable. + // Label selector cannot be empty. + ClusterSelector metav1.LabelSelector `json:"clusterSelector"` + // SecretRef specifies the Secret to be bootstrapped to the matched clusters + // Secret must be in the same namespace of the SecretSync object + SecretRef v1.LocalObjectReference `json:"secretRef"` + // TargetNamespace specifies the namespace which the secret should be bootstrapped in + // The default value is the namespace of the referenced secret + //+optional + TargetNamespace string `json:"targetNamespace,omitempty"` +} + +// SecretSyncStatus secretsync object status +type SecretSyncStatus struct { + // SecretVersions a map contains the ResourceVersion of the secret of each cluster + // Cluster name is the key and secret's ResourceVersion is the value + SecretVersions map[string]string `json:"versions"` +} + +``` diff --git a/website/versioned_docs/version-0.24.0/terraform/aws-eks.mdx b/website/versioned_docs/version-0.24.0/terraform/aws-eks.mdx new file mode 100644 index 0000000000..aabcde656b --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/aws-eks.mdx @@ -0,0 +1,51 @@ +--- +title: Configure IRSA for AWS EKS +hide_title: true +--- + +# Configure IRSA for AWS Elastic Kubernetes Service (EKS) + +To use AWS Elastic Kubernetes Service (EKS) with TF-controller, you can leverage IAM Roles for Service Accounts (IRSA) +as a way to provide credentials to the Terraform runners (`tf-runner` pods). +IRSA allows you to create IAM roles that can be assumed by the identity provider for your Kubernetes cluster, +which can then be used by the pods running in the cluster to access AWS resources. +This can be especially useful for automating infrastructure management tasks using TF-controller. + +To set up IRSA for use with TF-controller, you will need to follow a few steps to associate an OpenID Connect (OIDC) provider with your EKS cluster, +create a trust policy for the IAM role, and annotate the ServiceAccount for the `tf-runner` with the Role ARN. +In this document, we will walk you through these steps in detail so that you can use IRSA with TF-controller in your EKS cluster. + +To use AWS Elastic Kubernetes Service (EKS) with TF-controller, you will need to follow these steps: + +1. Use eksctl to associate an OpenID Connect (OIDC) provider with your EKS cluster. This can be done by running the following command: + + ```bash + eksctl utils associate-iam-oidc-provider --cluster CLUSTER_NAME --approve + ```` + +2. Replace `CLUSTER_NAME` with the name of your EKS cluster. This command will create an IAM OIDC provider and associate it with your EKS cluster. + +3. Follow the instructions in [the AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html) +to add a trust policy to the IAM role that grants the necessary permissions for Terraform. +Make sure to use the `namespace:serviceaccountname` of `flux-system:tf-runner`. This will give you a Role ARN that you will need in the next step. + +4. Annotate the ServiceAccount for the `tf-runner` with the obtained Role ARN in your cluster. You can do this by running the following command: + + ```bash + kubectl annotate -n flux-system serviceaccount tf-runner eks.amazonaws.com/role-arn=ROLE_ARN + ``` + +5. Replace `ROLE_ARN` with the Role ARN obtained in the previous step. + + If you are deploying TF-controller using Helm, you can pass the Role ARN as an annotation to the `tf-runner` ServiceAccount in your Helm values file. + This can be done by adding the following block to your values file: + + ```yaml {5} + values: + runner: + serviceAccount: + annotations: + eks.amazonaws.com/role-arn: ROLE_ARN + ``` + +By following these steps, you will be able to use the Terraform controller with your EKS cluster and provide the necessary AWS credentials for performing plans and applies. diff --git a/website/versioned_docs/version-0.24.0/terraform/backup-and-restore.mdx b/website/versioned_docs/version-0.24.0/terraform/backup-and-restore.mdx new file mode 100644 index 0000000000..ab5d92d1bd --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/backup-and-restore.mdx @@ -0,0 +1,48 @@ +--- +title: Backup and Restore State +hide_title: true +--- + +# Backup and restore Terraform state + +## Backup the tfstate + +Assume that we have the `my-stack` Terraform object with its `.spec.workspace` set to "default". + +```bash +kubectl get terraform + +NAME READY STATUS AGE +my-stack Unknown Initializing 28s +``` + +We can backup its tfstate out of the cluster, like this: + +```bash +WORKSPACE=default +NAME=my-stack + +kubectl get secret tfstate-${WORKSPACE}-${NAME} \ + -ojsonpath='{.data.tfstate}' \ + | base64 -d | gzip -d > terraform.tfstate +``` + +## Restore the tfstate + +To restore the tfstate file or import an existing tfstate file to the cluster, we can use the following operation: + +```bash +gzip terraform.tfstate + +WORKSPACE=default +NAME=my-stack + +kubectl create secret \ + generic tfstate-${WORKSPACE}-${NAME} \ + --from-file=tfstate=terraform.tfstate.gz \ + --dry-run=client -o=yaml \ + | yq e '.metadata.annotations["encoding"]="gzip"' - \ + > tfstate-${WORKSPACE}-${NAME}.yaml + +kubectl apply -f tfstate-${WORKSPACE}-${NAME}.yaml +``` diff --git a/website/versioned_docs/version-0.24.0/terraform/environment-variables.mdx b/website/versioned_docs/version-0.24.0/terraform/environment-variables.mdx new file mode 100644 index 0000000000..1fc9433afe --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/environment-variables.mdx @@ -0,0 +1,19 @@ +--- +title: Logging Env Variables +hide_title: true +--- + +# Logging Env Variables + +A Terraform Runner uses two environment variables, `DISABLE_TF_LOGS` and `ENABLE_SENSITIVE_TF_LOGS`, to control the logging behavior of the Terraform execution. + +To use these environment variables, they need to be set on each Terraform Runner pod where the Terraform code is being executed. +This can typically be done by adding them to the pod's environment variables in the Terraform Runner deployment configuration. + +- The `DISABLE_TF_LOGS` variable, when set to "1", will disable all Terraform output logs to stdout and stderr. +- The `ENABLE_SENSITIVE_TF_LOGS` variable, when set to "1", will enable logging of sensitive Terraform data, +such as secret variables, to the local log. However, it is important to note that for the `ENABLE_SENSITIVE_TF_LOGS` to take effect, +the `DISABLE_TF_LOGS` variable must also be set to "1". + +For more information on configuring the Terraform Runner and its environment variables, +please consult the documentation on [customizing runners](../using-terraform-cr/customize-runner) within the Weave TF-controller. diff --git a/website/versioned_docs/version-0.24.0/terraform/get-started.mdx b/website/versioned_docs/version-0.24.0/terraform/get-started.mdx new file mode 100644 index 0000000000..e90671ee75 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/get-started.mdx @@ -0,0 +1,167 @@ +--- +title: Get Started +hide_title: true +--- + +# Get Started with the Terraform Controller + +## Preflight Checks + +To set up the TF-controller, you will need to follow the steps in the preflight checks. +Here is a summary of what you will need to do: + + 1. Install Flux **v0.32.0** or later on your cluster. This includes installing the Flux CLI on your local machine and installing the Flux controllers on the cluster. + 2. Configure the network firewall or security groups on your cluster to allow incoming connections to **port 30000** on **each Runner's Pod in each namespace**. + This will allow the Controller to communicate with the Runner's Pod via gRPC. + 3. Configure the network firewall or security groups on your cluster to allow the Controller to download tar.gz BLOBs **from the Source controller** via **port 80** and + to post events to **the Notification controller** via **port 80**. + +It is important to note that the exact steps for setting up the TF-controller will depend on the specific environment +and infrastructure that you are using. You may need to refer to the documentation for your specific environment or +infrastructure to get more detailed instructions on how to set this up. + +## GitOps Installation + +To set up TF-Controller, you will need to perform the following actions: + +1. Create a local cluster using a tool such as `kind` or `minikube`. This will allow you to develop and test the Terraform Controller in a local environment before deploying it to a production cluster. + ```bash + kind create cluster --name tf-controller + ``` + +2. Install the Flux CLI on your local machine. This will allow you to interact with the Flux controllers on your cluster. + ```bash + brew install fluxcd/tap/flux + ``` + +3. Prepare a git repository to store the configuration files and manifests for Flux and TF-controller. Assuming you have a GitHub account, and your username is `$GITHUB_USER`, you can create a new repository called `gitops-tf-controller` using the following command: + ```bash + export GITHUB_USER= + export GITHUB_TOKEN= + + gh repo create $GITHUB_USER/gitops-tf-controller + ``` + +4. Bootstrap the cluster with Flux v2 (v0.32.0 or later) using the path for example `./cluster/my-cluster`. This will install Flux on the cluster and create a Flux system at `./cluster/my-cluster/flux-system`. + ```bash + git clone git@github.com:$GITHUB_USER/gitops-tf-controller.git + cd gitops-tf-controller + + flux bootstrap github \ + --owner=$GITHUB_USER \ + --repository=gitops-tf-controller \ + --branch=main \ + --path=./cluster/my-cluster \ + --personal \ + --token-auth + ``` + +5. Create a directory at `./cluster/my-cluster/infra/` and place the file `tf-controller.yaml` in this directory. + Download the TF-controller manifest from the release location (https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml) + and saving it to `./cluster/my-cluster/infra/tf-controller.yaml`. Add the manifest file to the git repository, commit the changes, and push the repository. + ```bash + mkdir -p ./cluster/my-cluster/infra/ + curl -s https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml > ./cluster/my-cluster/infra/tf-controller.yaml + ``` + +6. In the same directory, create `kustomization.yaml` file that contains the following: + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - tf-controller.yaml + ``` + Add the `kustomization.yaml` file to the git repository, commit the changes, and push the repository. + +If you want to use the Terraform Controller with the Notification Controller, +you will also need to modify the manifest to allow the Notification Controller to work with the Terraform Controller. +The exact steps for doing this will depend on the specific requirements of your environment and the configuration of the Notification Controller. +You may need to refer to [the documentation for the Terraform Controller and Notification Controller](https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-notifications-for-third-party-controllers) +for more information on how to set this up. + +## Other Installation Methods + +Before using TF-controller, you have to install Flux by using either `flux install` or `flux bootstrap` command. +Please note that TF-controller now requires **Flux v0.32.0** or later, so please make sure you have the latest version of Flux. +After that you can install TF-controller with Flux HelmRelease by: + +```shell +kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml +``` + +For the most recent release candidate of TF-controller, please use [rc.yaml](https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/rc.yaml). + +```shell +kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/rc.yaml +``` + +or manually with Helm by: + +```shell +# Add tf-controller helm repository +helm repo add tf-controller https://weaveworks.github.io/tf-controller/ + +# Install tf-controller +helm upgrade -i tf-controller tf-controller/tf-controller \ + --namespace flux-system +``` + +For details on configurable parameters of the TF-controller chart, +please see [chart readme](https://github.com/weaveworks/tf-controller/tree/main/charts/tf-controller#tf-controller-for-flux). + +Alternatively, you can install TF-controller via `kubectl`: + +```shell +export TF_CON_VER=v0.14.0 +kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.crds.yaml +kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.rbac.yaml +kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.deployment.yaml +``` + +## Quick start + +Here's a simple example of how to GitOps your Terraform resources with TF-controller and Flux. + +### Define source + +First, we need to define a Source controller's source (`GitRepository`, `Bucket`, `OCIRepository`), for example: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: helloworld + namespace: flux-system +spec: + interval: 30s + url: https://github.com/tf-controller/helloworld + ref: + branch: main +``` + +### The GitOps Automation mode + +The GitOps automation mode could be enabled by setting `.spec.approvePlan=auto`. In this mode, Terraform resources will be planned, +and automatically applied for you. + +```yaml +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + interval: 1m + approvePlan: auto + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` + +For a full list of features and how to use them, please follow the [terraform section](../overview) in our docs. + +## Other Examples + * A Terraform GitOps with Flux to automatically reconcile your [AWS IAM Policies](https://github.com/tf-controller/aws-iam-policies). + * GitOps an existing EKS cluster, by partially import its nodegroup and manage it with TF-controller: [An EKS scaling example](https://github.com/tf-controller/eks-scaling). diff --git a/website/versioned_docs/version-0.24.0/terraform/oci-artifact.mdx b/website/versioned_docs/version-0.24.0/terraform/oci-artifact.mdx new file mode 100644 index 0000000000..48e9978633 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/oci-artifact.mdx @@ -0,0 +1,57 @@ +--- +title: OCI Artifact +hide_title: true +--- + +# Using OCI Artifacts as a Source for Terraform Objects + +To use OCI artifacts as the source for `Terraform` objects, you'll need to have Flux 2 version **v0.32.0** or higher. + +To create an OCI artifact for your Terraform modules, you can use the Flux CLI by running the following commands: +```bash +flux push artifact oci://ghcr.io/tf-controller/helloworld:$(git rev-parse --short HEAD) \ + --path="./modules" \ + --source="$(git config --get remote.origin.url)" \ + --revision="$(git branch --show-current)/$(git rev-parse HEAD)" + +flux tag artifact oci://ghcr.io/tf-controller/helloworld:$(git rev-parse --short HEAD) \ + --tag main +``` + +To use the OCI artifact as the source for your `Terraform` object, +you'll need to define an `OCIRepository` and use it as the `spec.sourceRef` of your `Terraform` object: + +
Expand to view + +```yaml {5,20-22} +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: helloworld-oci +spec: + interval: 1m + url: oci://ghcr.io/tf-controller/helloworld + ref: + tag: main +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld-tf-oci +spec: + path: ./ + approvePlan: auto + interval: 1m + sourceRef: + kind: OCIRepository + name: helloworld-oci + writeOutputsToSecret: + name: helloworld-outputs +``` + +
+ +This configuration will use the OCI artifact at `oci://ghcr.io/tf-controller/helloworld` with the `main` tag as the +source for your `Terraform` object. The object will be reconciled every 1 minute, and will use the "auto-apply" mode to +apply any changes to your resources. The outputs of the Terraform run will be written to a `Secret` called `helloworld-outputs`. diff --git a/website/versioned_docs/version-0.24.0/terraform/overview.mdx b/website/versioned_docs/version-0.24.0/terraform/overview.mdx new file mode 100644 index 0000000000..482a9cae03 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/overview.mdx @@ -0,0 +1,70 @@ +--- +title: Overview +hide_title: true +--- + +# Overview + +TF-controller is a controller for Weave GitOps that follows the patterns established +by [Flux](https://fluxcd.io). It is a reliable tool for managing your infrastructure using the GitOps approach. +With its support for Terraform and integration with Weave GitOps, +you can trust that it will help you effectively GitOps-ify your infrastructure and application +resources in the Kubernetes and Terraform universe at your own pace. + +The following GitOps models are available to suit your specific needs: + + 1. **GitOps Automation Model:** Fully automate the GitOps process for all of your Terraform resources, including the provisioning and enforcement steps. + 2. **Hybrid GitOps Automation Model:** Choose to GitOps-ify certain parts of your existing infrastructure resources, such as a nodegroup or security group in an existing EKS cluster. + 3. **State Enforcement Model:** Use GitOps to enforce an existing `tfstate` without making any other changes. + 4. **Drift Detection Model:** Use GitOps for drift detection, so you can decide what actions to take when a drift occurs. + +To get started with TF-controller, simply follow the provided [getting started](../get-started) guide. + +## Features + + * **Multi-Tenancy**: TF-controller supports multi-tenancy by running Terraform `plan` and `apply` inside Runner Pods. + When specifying `.metadata.namespace` and `.spec.serviceAccountName`, the Runner Pod uses the specified ServiceAccount + and runs inside the specified Namespace. These settings enable the soft multi-tenancy model, which can be used within + the Flux multi-tenancy setup. _This feature is available since v0.9.0._ + * **GitOps Automation for Terraform**: With setting `.spec.approvePlan=auto`, it allows a `Terraform` object + to be reconciled and act as the representation of your Terraform resources. The TF-controller uses the spec of + the `Terraform` object to perform `plan`, `apply` its associated Terraform resources. It then stores + the `TFSTATE` of the applied resources as a `Secret` inside the Kubernetes cluster. After `.spec.interval` passes, + the controller performs drift detection to check if there is a drift occurred between your live system, + and your Terraform resources. If a drift occurs, the plan to fix that drift will be generated and applied automatically. + _This feature is available since v0.3.0._ + * **Drift detection**: This feature is a part of the GitOps automation feature. The controller detects and fixes drift + for your infrastructures, based on the Terraform resources and their `TFSTATE`. _This feature is available since v0.5.0._ + * Drift detection is enabled by default. You can use the field `.spec.disableDriftDetection` to disable this behaviour. + _This feature is available since v0.7.0._ + * The Drift detection only mode, without plan or apply steps, allows you to perform read-only drift detection. + _This feature is available since v0.8.0._ + * **Plan and Manual Approve**: This feature allows you to separate the `plan`, out of the `apply` step, just like + the Terraform workflow you are familiar with. A good thing about this is that it is done in a GitOps way. When a plan + is generated, the controller shows you a message like **'set approvePlan: "plan-main-123" to apply this plan.'**. + You make change to the field `.spec.approvePlan`, commit and push to tell the TF-controller to apply the plan for you. + With this GitOps workflow, you can optionally create and push this change to a new branch for your team member to + review and approve too. _This feature is available since v0.6.0._ + * **YAML-based Terraform**: The `Terraform` object in v0.13.0+ allows you to better configure your + Terraform resources via YAMLs, but without introducing any extra CRDs to your cluster. Together with a new generator + called **Tofu-Jet**, we'll now be able to ship pre-generated primitive Terraform modules for all major cloud providers. + A primitive Terraform module is a module that only contains a single primitive resource, like `aws_iam_role`, or `aws_iam_policy`. + With this concept, we would be able to use Terraform without writing Terraform codes, and make it more GitOps-friendly at the same time. + _This feature is available since v0.13.0._ + * **Enterprise Dashboard Support:** with Weave GitOps Enterprise v0.9.6 and later, you are now able to manage `Terraform` objects the same way you can + with `Kustomization` and `HelmReleases`. + * **First-class Terraform Cloud Support:** `Terraform` objects can now be configured to use Terraform Cloud as the backend + for storing the state with `spec.cloud`. _This feature is available since v0.14.0._ + +## Dependencies + +TF-controller has its own versioning system that is separate from the versioning system used by Weave GitOps. +This means that you can install and use TF-controller independently of Weave GitOps and it will not be affected +by the version of Weave GitOps that you are using. + +Here is the dependency matrix: + +| Version | Terraform | Source Controller | Flux v2 | +|:-----------:|:---------:|:-----------------:|:-------:| +| **v0.14.0** | v1.3.9 | v0.35.1 | v0.40.x | +| v0.13.1 | v1.3.1 | v0.31.0 | v0.38.x | diff --git a/website/versioned_docs/version-0.24.0/terraform/terraform-enterprise.mdx b/website/versioned_docs/version-0.24.0/terraform/terraform-enterprise.mdx new file mode 100644 index 0000000000..7407fdc996 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/terraform-enterprise.mdx @@ -0,0 +1,42 @@ +--- +title: Terraform Enterprise +hide_title: true +--- + +# Terraform Enterprise + +## Terraform Enterprise Integration + +Starting from v0.9.5, Weave GitOps tf-controller officially supports integration to **Terraform Cloud (TFC)** and +**Terraform Enterprise (TFE)**. Here are the steps to set up tf-controller for your TFE instance. + +![](./tfe_integration_01.png) + +### Terraform Login + +First, you need to obtain an API token from your TFE. You can use `terraform login` command to do so. + +```shell +terraform login tfe.dev.example.com +``` + +Then you can find your API token inside `$HOME/.terraform.d/credentials.tfrc.json`. +Content of the file will look like this: + +```json +{ + "credentials": { + "tfe.dev.example.com": { + "token": "mXXXXXXXXX.atlasv1.ixXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + } + } +} +``` + +### Prepare an TFRC file +TF-controller accepts an TFRC file in the HCL format. So you have to prepare `terraform.tfrc` file using contents from above. +```hcl +credentials "tfe.dev.example.com" { + token = "mXXXXXXXXX.atlasv1.ixXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" +} +``` \ No newline at end of file diff --git a/website/versioned_docs/version-0.24.0/terraform/tfctl.mdx b/website/versioned_docs/version-0.24.0/terraform/tfctl.mdx new file mode 100644 index 0000000000..cb37045a12 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/tfctl.mdx @@ -0,0 +1,43 @@ +--- +title: Terraform Controller CLI +hide_title: true +--- + +# Terraform Controller CLI + +`tfctl` is a command-line utility to help with tf-controller operations. + +:::note +We are planning on migrating these features into the Weave GitOps CLI. +::: + +## Installation + +You can download the `tfctl` binary via the GitHub releases page: [https://github.com/weaveworks/tf-controller/releases](https://github.com/weaveworks/tf-controller/releases) + +``` +Usage: + tfctl [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + create Create a Terraform resource + delete Delete a Terraform resource + get Get Terraform resources + help Help about any command + install Install the tf-controller + plan Plan a Terraform configuration + reconcile Trigger a reconcile of the provided resource + resume Resume reconciliation for the provided resource + suspend Suspend reconciliation for the provided resource + uninstall Uninstall the tf-controller + version Prints tf-controller and tfctl version information + +Flags: + -h, --help help for tfctl + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -n, --namespace string The kubernetes namespace to use for CLI requests. (default "flux-system") + --terraform string The location of the terraform binary. (default "/usr/bin/terraform") + +Use "tfctl [command] --help" for more information about a command. +``` diff --git a/website/versioned_docs/version-0.24.0/terraform/tfe_integration_01.png b/website/versioned_docs/version-0.24.0/terraform/tfe_integration_01.png new file mode 100644 index 0000000000..bf007cacdc Binary files /dev/null and b/website/versioned_docs/version-0.24.0/terraform/tfe_integration_01.png differ diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/custom-backend.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/custom-backend.mdx new file mode 100644 index 0000000000..da3a831f64 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/custom-backend.mdx @@ -0,0 +1,63 @@ +--- +title: Configure Custom Backend +hide_title: true +--- + +# Configure Custom Backend + +By default, TF-controller will use the [Kubernetes backend](https://www.terraform.io/language/settings/backends/kubernetes) +to store the Terraform state file (tfstate) in the cluster. +The tfstate is stored in a `Secret` named "tfstate-${workspace}-${secretSuffix}", +where the default suffix is the name of the `Terraform` resource. + +You can override this default suffix by setting `.spec.backendConfig.secretSuffix` +in the `Terraform` object. The default workspace name is "default", +but you can also override the workspace by setting `.spec.workspace` to a different value. + +If you want to use a custom backend, such as GCS or S3, +you can configure it by defining `.spec.backendConfig.customConfiguration` +in the `Terraform` object. + +Here is an example of how to use a custom backend with the `Terraform` object: + +
Expand to view + +```yaml {9-21} +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + backendConfig: + customConfiguration: | + backend "s3" { + bucket = "s3-terraform-state1" + key = "dev/terraform.tfstate" + region = "us-east-1" + endpoint = "http://localhost:4566" + skip_credentials_validation = true + skip_metadata_api_check = true + force_path_style = true + dynamodb_table = "terraformlock" + dynamodb_endpoint = "http://localhost:4566" + encrypt = true + } + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + runnerPodTemplate: + spec: + image: registry.io/tf-runner:xyz +``` + +
+ +In this example, the `Terraform` object is using a custom backend +with a bucket named "s3-terraform-state1" in the "us-east-1" region, +with the key "dev/terraform.tfstate". diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/customize-runner.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/customize-runner.mdx new file mode 100644 index 0000000000..4fcbebe649 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/customize-runner.mdx @@ -0,0 +1,69 @@ +--- +title: Customize Runner Pods +hide_title: true +--- + +# Customize Runner Pod's Metadata + +In some situations, it is needed to add custom labels and annotations to the runner pod used to reconcile Terraform. +For example, for Azure AKS to grant pod active directory permissions using Azure Active Directory (AAD) Pod Identity, +a label like `aadpodidbinding: myIdentity` on the pod is required. + +
Expand to view + +```yaml +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + runnerPodTemplate: + metadata: + labels: + aadpodidbinding: myIdentity + annotations: + company.com/abc: xyz +``` +
+ +## Customize Runner Pod Image + +By default, the Terraform controller uses `RUNNER_POD_IMAGE` environment variable to identify the Runner Pod's image to use. You can customize the image on the global level by updating the value of the environment variable or, you can specify an image to use per Terraform object for its reconciliation. + +
Expand to view + +```yaml +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + runnerPodTemplate: + spec: + image: registry.io/tf-runner:xyz +``` + +
+ +You can use [`runner.Dockerfile`](https://github.com/weaveworks/tf-controller/blob/main/runner.Dockerfile) as a basis of customizing runner pod image. + +## Customize Runner Pod Specifications + +You can also customize various Runner Pod `spec` fields to control and configure how the Runner Pod runs. +For example, you can configure Runner Pod `spec` affinity and tolerations if you need to run in on a specific set of nodes. Please see [RunnerPodSpec](https://weaveworks.github.io/tf-controller/References/terraform/#infra.contrib.fluxcd.io/v1alpha1.RunnerPodSpec) for a list of the configurable Runner Pod `spec` fields. diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/depends-on.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/depends-on.mdx new file mode 100644 index 0000000000..d4e6cd1dae --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/depends-on.mdx @@ -0,0 +1,105 @@ +--- +title: Dependency Management +hide_title: true +--- + +# Dependency Management + +TF-controller supports GitOps dependency management. The GitOps dependency management feature +is based on the Kustomization controller of Flux. + +This means that you can use TF-controller to provision resources that depend on other resources at the GitOps level. +For example, you can use TF-controller to provision an S3 bucket, and then use TF-controller +to provision another resource to configure ACL for that bucket. + +## Create a Terraform object + +Similar to the same feature in the Kustomization controller, the dependency management feature is enabled +by setting the `dependsOn` field in the `Terraform` object. The `dependsOn` field is a list of +`Terraform` objects. + +First, create a `Terraform` object to provision the S3 bucket, name it `aws-s3-bucket`. +The S3 bucket is provisioned by the Terraform module `aws_s3_bucket` in the OCI image `aws-package-v4.33.0`. +It is configured to use the `auto-apply` mode, and write outputs to the secret `aws-s3-bucket-outputs`. + +
Expand to view + +```yaml {20-24} +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: aws-s3-bucket + namespace: flux-system +spec: + path: aws_s3_bucket + values: + bucket: my-tf-controller-test-bucket + tags: + Environment: Dev + Name: My bucket + sourceRef: + kind: OCIRepository + name: aws-package-v4.33.0 + approvePlan: auto + interval: 2m + destroyResourcesOnDeletion: true + writeOutputsToSecret: + name: aws-s3-bucket-outputs + outputs: + - arn + - bucket + runnerPodTemplate: + spec: + envFrom: + - secretRef: + name: aws-credentials +``` + +
+ +Second, create a `Terraform` object to configure ACL for the S3 bucket, name it `aws-s3-bucket-acl`. +The ACL is provisioned by the Terraform module `aws_s3_bucket_acl`, also from the OCI image `aws-package-v4.33.0`. + +In the `dependsOn` field, specify the `Terraform` object that provisions the S3 bucket. +This means that the ACL will be configured only after the S3 bucket is provisioned, and has its outputs Secret written. +We can read the outputs of the S3 bucket from the Secret `aws-s3-bucket-outputs`, by specifying the `spec.readInputsFromSecrets` field. +The `spec.readInputsFromSecrets` field is a list of Secret objects. +Its `name` field is the name of the Secret, and its `as` field is the name of variable that can be used in the `spec.values` block. + +For example, the `spec.values.bucket` field in the `aws-s3-bucket-acl` Terraform object is set to `${{ .aws_s3_bucket.bucket }}`. + +Please note that we use `${{` and `}}` as the delimiters for the variable name, instead of the Helm default ones, `{{` and `}}`. + +
Expand to view + +```yaml hl_lines="11 18 20-21" +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: aws-s3-bucket-acl + namespace: flux-system +spec: + path: aws_s3_bucket_acl + values: + acl: private + bucket: ${{ .aws_s3_bucket.bucket }} + sourceRef: + kind: OCIRepository + name: aws-package-v4.33.0 + approvePlan: auto + interval: 3m + dependsOn: + - name: aws-s3-bucket + readInputsFromSecrets: + - name: aws-s3-bucket-outputs + as: aws_s3_bucket + runnerPodTemplate: + spec: + envFrom: + - secretRef: + name: aws-credentials +``` + +
diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/detect-drifts-only-without-plan-or-apply.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/detect-drifts-only-without-plan-or-apply.mdx new file mode 100644 index 0000000000..fe034b2b7d --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/detect-drifts-only-without-plan-or-apply.mdx @@ -0,0 +1,28 @@ +--- +title: Detect drifts only without planning or applying +hide_title: true +--- + +# Use TF-controller to detect drifts only without planning or applying + +To detect drifts of your Terraform resources only, you can use the `Terraform` custom resource (CR) object +in TF-controller and set the `spec.approvePlan` field to `disable`. This will tell the controller to skip +the plan and apply stages, and only perform drift detection. + +Here is an example of a `Terraform` CR object with `spec.approvePlan` set to `disable`: + +```yaml {7} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: hello-world + namespace: flux-system +spec: + approvePlan: disable + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/drift-detection-disabled.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/drift-detection-disabled.mdx new file mode 100644 index 0000000000..71769108f1 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/drift-detection-disabled.mdx @@ -0,0 +1,37 @@ +--- +title: Disable drift detection +hide_title: true +--- + +# Use TF-controller with drift detection disabled + +To disable drift detection in a `Terraform` object reconciled by the TF-controller, you can add the following configuration to your `Terraform` object: + +```yaml {8} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + disableDriftDetection: true + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` + +This configuration will tell the TF-controller to run the specified Terraform configuration +located at `./` in the `flux-system` namespace, with a polling `interval` of 1 minute. +It will also automatically approve any plans that are created, +and it will not perform drift detection. + +Drift detection is a feature that compares the current state of the resources +managed by Terraform with the desired state defined in the configuration files. +If there are any differences, the TF-controller will create a plan to bring +the resources back in line with the configuration. +By setting `spec.disableDriftDetection: true`, you are telling the TF-controller +to skip this check and not create any plans to correct for any detected drift. diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/modules.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/modules.mdx new file mode 100644 index 0000000000..d518e663c2 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/modules.mdx @@ -0,0 +1,71 @@ +--- +title: Primitive Modules +hide_title: true +--- + +# Primitive Modules + +This document describes how to use the Weave TF-controller with a primitive module. +It requires TF-controller v0.13+ to run the example. + +## What is a primitive module? + +It's a Terraform module that contains only a single resource. + + * A Terraform primitive module must contains the "values" variable. + * The "values" variable must be an object with fields of optional types. + * The module must be placed under a directory, which is named after the resource. + * The directory can optionally contain other files, for example the .terraform.lock.hcl. + * We call a set of primitive modules bundled into an OCI image, a package. + +## Hello World Primitive Module + +Here is an example of how a primitive module can be defined in YAML. +Assume that we have a ready-to-use OCI image with a primitive module for the imaginary resource `aws_hello_world`, +and the image is tagged as `ghcr.io/tf-controller/hello-primitive-modules/v4.32.0:v1`. + +We'll use the following Terraform object definition to provision the resource. + +1. We need to create a Terraform object with the `spec.sourceRef.kind` field +set to `OCIRepository` and the `spec.sourceRef.name` field set to the name of the OCIRepository object. + +2. We need to set the `spec.path` field to the name of the resource, in this case `aws_hello_world`. + +3. We need to set the `spec.values` field to the values of the resource. This is a YAML object that +will be converted to an HCL variable, and passed to the Terraform module. + +4. We need to set the `spec.approvePlan` field to `auto` to automatically approve the plan. + +
Expand to view + +```yaml {19-26} +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: hello-package-v4.32.0 + namespace: flux-system +spec: + interval: 30s + url: oci://ghcr.io/tf-controller/hello-primitive-modules/v4.32.0 + ref: + tag: v1 +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: hello-world + namespace: flux-system +spec: + path: aws_hello_world + values: + greeting: Hi + subject: my world + sourceRef: + kind: OCIRepository + name: hello-package-v4.32.0 + approvePlan: auto + interval: 1h0m +``` + +
diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/plan-and-manually-apply-terraform-resources.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/plan-and-manually-apply-terraform-resources.mdx new file mode 100644 index 0000000000..7544651f28 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/plan-and-manually-apply-terraform-resources.mdx @@ -0,0 +1,94 @@ +--- +title: Plan and manually apply Terraform resources +hide_title: true +--- + +# Use TF-controller to plan and manually apply Terraform resources + +TF-controller is a tool that allows you to manage infrastructure as code +using Terraform within a Kubernetes cluster. With TF-controller, you can +define your infrastructure in a declarative way and have it automatically managed and updated. + +In this guide, we will walk through the steps of using TF-controller to plan and +manually apply Terraform resources. +This involves creating a `Terraform` object and a `GitRepository` object, +and then using kubectl to view the plan before applying it. + +We will start by creating the `Terraform` object and specifying the necessary fields, +including the `approvePlan` field. +We will then create the `GitRepository` object, +which points to the Git repository containing the Terraform configuration. + +Once these objects are created, we will use kubectl to obtain the `approvePlan` value +and set it in the `Terraform` object. After making our changes and pushing them to the Git repository, +TF-controller will apply the plan and create the real resources. + +## Define the Terraform object + +To use the plan & manual approval workflow with TF-controller, +you will need to start by either setting the `spec.approvePlan` field in the `Terraform` object +to be the blank value, or omitting it entirely. +This will tell TF-controller to use the plan & manual approval workflow, rather than the auto-apply workflow. +If you want to use the auto-apply workflow, you will need to set the `spec.approvePlan` field to "auto". + +To set the `spec.approvePlan` field to be the blank value, +you can include it in the spec field and set it to an empty string. +Alternatively, you can omit the `spec.approvePlan` field entirely and let it default to the blank value. + +In addition to setting the `spec.approvePlan` field, you will also need to specify the `interval`, `path`, +and `sourceRef` fields in the spec field. +The `interval` field determines how often TF-controller will run the Terraform configuration, +the `path` field specifies the location of the configuration files, +and the `sourceRef` field points to the GitRepository object. + +```yaml {7} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: "" # or you can omit this field + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` + +## View the approval message + +After setting the approvePlan field in the Terraform object and creating any necessary objects, the controller will generate a plan and output a message about how to use the approvePlan field to approve the plan. + +To obtain this message, you can run the following command: + +```bash +kubectl -n flux-system get tf/helloworld +``` + +This command will output the message containing the approvePlan value +that you will need to use to approve the plan. +Once you have this value, you can edit the Terraform object file, and set the `spec.approvePlan` field +to the value obtained from the message. + +After making your changes and pushing them to the Git repository, +TF-controller will apply the plan and create the real resources. +This process is known as the plan & manual approval workflow, +as it involves generating a plan and requiring manual approval before the changes are applied. + +```yaml {7} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: hello-world + namespace: flux-system +spec: + approvePlan: plan-main-b8e362c206 # first 8 digits of a commit hash is enough + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/provision-resources-and-auto-approve.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/provision-resources-and-auto-approve.mdx new file mode 100644 index 0000000000..38a573b4a9 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/provision-resources-and-auto-approve.mdx @@ -0,0 +1,91 @@ +--- +title: Provision resources and auto approve +hide_title: true +--- + +# Use TF-controller to provision resources and auto approve + +TF-controller is a tool that allows you to manage infrastructure as code using Terraform, +a popular infrastructure as code tool, within a Kubernetes cluster. +With TF-controller, you can define your infrastructure in a declarative way and have +it automatically managed and updated. In this guide, we will walk through the steps of +setting up and using TF-controller to provision resources, as well as setting it up to automatically approve changes. + +## Create a Terraform object + +To get started with using TF-controller, the first step is to prepare the necessary objects. +This includes creating a Terraform object and a Flux source object. + +The Terraform object is a Kubernetes custom resource (CR) that defines +the Terraform module, backend configuration, and GitOps automation mode. +The Terraform module is the configuration used to provision resources +and can be stored in a Git repository or packaged in an OCI image in an OCI registry. + +The backend configuration is optional and sets the backend to be used to store the Terraform state. +If not specified, the Kubernetes backend will be used by default. + +The GitOps automation mode is also optional, with the default being "plan-and-manually-apply". +In this example, we'll use the "auto-apply" mode. + +The Flux source object is a source of configuration files, +such as a Git repository or OCI registry. It tells TF-controller where to find the Terraform module +and any other necessary configuration files. +There are several types of Flux source objects available, including `GitRepository` and `OCIRepository`. +Choose the one that best fits your needs. + +Once you have prepared these objects, you are ready to start using TF-controller to manage your infrastructure. + +## GitOps Automation Mode + +In TF-controller, the GitOps automation mode determines how Terraform runs and manages your infrastructure. +There are several options available for the GitOps automation mode, including "plan-and-manually-apply" and "auto-apply". + +In the "plan-and-manually-apply" mode, +TF-controller will run a Terraform plan and output the proposed changes to a Git repository. +A human must then review and manually apply the changes. +This is the default GitOps automation mode if none is specified. + +In the "auto-apply" mode, TF-controller will automatically apply the changes after a Terraform plan is run. +This can be useful for environments where changes can be made automatically, +but it is important to ensure that the proper controls, like policies, are in place to prevent unintended changes +from being applied. + +To specify the GitOps automation mode in a Terraform object, +you can set the `spec.approvePlan` field to the desired value. For example, to use the "auto-apply" mode, y +ou would set it to `spec.approvePlan: auto`. + +It is important to carefully consider which GitOps automation mode is appropriate for your use case to ensure that +your infrastructure is properly managed and controlled. + +## Example + +```yaml {8} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld +spec: + path: ./helloworld + interval: 10m + approvePlan: auto + sourceRef: + kind: GitRepository + name: helloworld +``` + +This code is defining a `Terraform` object in Kubernetes. +The `apiVersion` field specifies the version of the Kubernetes API being used, +and the `kind` field specifies that it is a `Terraform` object. +The `metadata` block contains information about the object, including its `name`. + +The `spec` field contains the specification for the `Terraform` object. +The `path` field specifies the path to the Terraform configuration files, +in this case a directory named "helloworld". +The `interval` field specifies the frequency at which TF-controller should run the Terraform configuration, +in this case every 10 minutes. The `approvePlan` field specifies whether or not +to automatically approve the changes proposed by a Terraform plan. +In this case, it is set to `auto`, meaning that changes will be automatically approved. + +The `sourceRef` field specifies the Flux source object to be used. +In this case, it is a `GitRepository` object with the name "helloworld". +This indicates that the Terraform configuration is stored in a Git repository object with the name `helloworld`. diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/provision-resources-and-write-output-data.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/provision-resources-and-write-output-data.mdx new file mode 100644 index 0000000000..873a8d9acb --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/provision-resources-and-write-output-data.mdx @@ -0,0 +1,118 @@ +--- +title: Provision resources and write output data +hide_title: true +--- + +# Output Data + +Output data is data produced by Terraform as a result of running a configuration. +Output data can include values such as resource IDs, IP addresses, and other information about the resources that have been created. + +With TF-controller, you can use the `.spec.writeOutputsToSecret` field to write the outputs created by Terraform to a secret. +A secret is a Kubernetes resource that stores sensitive data, such as passwords, API keys, and other confidential information. + +## Write all outputs + +To write all outputs created by Terraform to a secret using TF-controller, you will need to create a Terraform object and specify the .spec.writeOutputsToSecret.name field. + +Here is an example of a Terraform object that writes all outputs to a secret named "helloworld-output": + +```yaml {14-15} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + writeOutputsToSecret: + name: helloworld-output +``` + +In this example, the `.spec.writeOutputsToSecret.name` field is set to "helloworld-output", +which specifies the name of the secret that the outputs will be written to. +By default, the controller will write all outputs to the secret. + +To use this `Terraform` object, you will also need to create a `GitRepository` object to specify +the location of the Terraform configuration files. In this example, the `GitRepository` object has the name "helloworld" +and is located in the "flux-system" namespace. + +Once the `Terraform` and `GitRepository` objects are created, the controller will automatically write all outputs +created by Terraform to the specified secret. This can be useful in situations where you want to store the outputs +in a secure location or use them in other parts of your infrastructure. + +## Selectively Writing Outputs + +In addition to writing all outputs created by Terraform to a secret, +you can also choose to write only a subset of outputs by specifying the output names you want to write in the `.spec.writeOutputsToSecret.outputs` array. + +To do this, you will need to create a `Terraform` object and specify the `.spec.writeOutputsToSecret.name` and `.spec.writeOutputsToSecret.outputs` fields. + +Here is an example of a `Terraform` object that writes only the outputs with the names "hello_world" and "my_sensitive_data" to a secret named "helloworld-output": + +```yaml {16-18} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + writeOutputsToSecret: + name: helloworld-output + outputs: + - hello_world + - my_sensitive_data +``` + +In this example, the `.spec.writeOutputsToSecret.name` field is set to "helloworld-output", +which specifies the name of the secret that the outputs will be written to. +The `.spec.writeOutputsToSecret.outputs` field is an array containing the names of the outputs to be written to the secret. + +## Renaming outputs + +Sometimes you may want to rename an output in order to use it with other components in your GitOps pipeline. +For example, you may have a key in a secret manager that must be named a certain way in order to be used by other controllers. + +TF-controller provides support for renaming outputs by using the `old_name:new_name` format in the `.spec.writeOutputsToSecret.outputs` field. + +To rename an output with TF-controller, you will need to create a Terraform object and specify +the `.spec.writeOutputsToSecret.name` and `.spec.writeOutputsToSecret.outputs` fields. + +Here is an example of a Terraform object that renames the "age_key" output to "age.agekey" and writes it to a secret named "helloworld-output": + +```yaml {16-17} +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + writeOutputsToSecret: + name: helloworld-output + outputs: + - age_key:age.agekey +``` + +In this example, the name field is still set to "helloworld-output", +but the outputs field now contains the `old_name:new_name` mapping, +which renames the "age_key" output to "age.agekey" as it is written to the secret. diff --git a/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/set-variables-for-trraform-resources.mdx b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/set-variables-for-trraform-resources.mdx new file mode 100644 index 0000000000..0ee2c2e9b9 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/terraform/using-terraform-cr/set-variables-for-trraform-resources.mdx @@ -0,0 +1,128 @@ +--- +title: Set variables for Terraform resources +hide_title: true +--- + +# Use TF-controller to set variables for Terraform resources + +:::warning Breaking Change +This is a breaking change of the `v1alpha1` API. +::: + +Users who are upgrading from TF-controller <= 0.7.0 require updating `varsFrom`, +from a single object: + +```yaml {2} + varsFrom: + kind: ConfigMap + name: cluster-config +``` + +to be an array of object, like this: + +```yaml {2} + varsFrom: + - kind: ConfigMap + name: cluster-config +``` + +## Set variables + +TF-controller allows you to pass variables to Terraform using the `vars` and `varsFrom` fields in a `Terraform` object. + +Inline variables can be set using the `vars` field, which supports HCL string, number, bool, object, and list types. +The `varsFrom` field accepts a list of `ConfigMaps` or `Secrets` and allows you to select specific keys using +the `varsKeys` property, or you can omit this field to select all keys from the input source. + +If the same variable key is passed multiple times, the controller will use the latter most instance of the key +passed to `varsFrom`. + +Here is an example of a `Terraform` object that sets inline variables +using the `vars` field and retrieves variables from a `ConfigMap` and `Secret` using the `varsFrom` field: + +
Expand to view + +```yaml {15-20,22-28} +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + vars: + - name: region + value: us-east-1 + - name: env + value: dev + - name: instanceType + value: t3-small + varsFrom: + - kind: ConfigMap + name: cluster-config + varsKeys: + - nodeCount + - instanceType + - kind: Secret + name: cluster-creds +``` + +
+ +## Variables as HCL + +The `vars` field in a `Terraform` object allows you to set variables for your Terraform configuration. +This field supports HCL string, number, bool, object, and list types. + +In the example provided, the vars field sets the value of the `cluster_spec` variable to an object +with four fields: "region", "env", "node_count", and "public". The "region" and "env" fields are strings, +the "node_count" field is a number, and the "public" field is a boolean. + +This allows you to set variables in your Terraform configuration in a flexible and dynamic way, +using data that can be passed in through the `Terraform` object. + +
Expand to view + +```hcl {3-6} +variable "cluster_spec" { + type = object({ + region = string + env = string + node_count = number + public = bool + }) +} +``` + +```yaml {17-20} +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + vars: + - name: cluster_spec + value: + region: us-east-1 + env: dev + node_count: 10 + public: false +``` + +
diff --git a/website/versioned_docs/version-0.24.0/workspaces/imgs/list-workspaces-view.png b/website/versioned_docs/version-0.24.0/workspaces/imgs/list-workspaces-view.png new file mode 100644 index 0000000000..47bea906e4 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/workspaces/imgs/list-workspaces-view.png differ diff --git a/website/versioned_docs/version-0.24.0/workspaces/imgs/workspace-details-view.png b/website/versioned_docs/version-0.24.0/workspaces/imgs/workspace-details-view.png new file mode 100644 index 0000000000..6930a05019 Binary files /dev/null and b/website/versioned_docs/version-0.24.0/workspaces/imgs/workspace-details-view.png differ diff --git a/website/versioned_docs/version-0.24.0/workspaces/intro.mdx b/website/versioned_docs/version-0.24.0/workspaces/intro.mdx new file mode 100644 index 0000000000..e5c6d013f9 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/workspaces/intro.mdx @@ -0,0 +1,16 @@ +--- +title: Introduction +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Workspaces + +Organizations working with Kubernetes have a tremendous need to manage tenancy for numerous software delivery teams. Weave GitOps Workspaces offers tenancy management for Kubernetes clusters at scale. Weave GitOps Workspaces, build on top of Flux's powerful approach to managing tenancy, and add in policies that help define finer grain rules on the tenants. + +With Workspaces, platform operators get to create all workspaces using one or more YAML files. All it takes is a single CLI command to generate all necessary YAML configuration files needed to set up the tenant as well as a list of policies that apply to each workspace and the list of repositories to which each workspace has access. diff --git a/website/versioned_docs/version-0.24.0/workspaces/multi-tenancy.mdx b/website/versioned_docs/version-0.24.0/workspaces/multi-tenancy.mdx new file mode 100644 index 0000000000..48f6043878 --- /dev/null +++ b/website/versioned_docs/version-0.24.0/workspaces/multi-tenancy.mdx @@ -0,0 +1,313 @@ +--- +title: Multi Tenancy +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Multi Tenancy + +Multi tenancy provides users with the ability to define boundaries to multiple engineering teams working on a single cluster. Through a simple interface it adds permissions to the necessary Kubernetes resources to make it easy for customers to manage their multiple tenants. + +WGE multi tenancy expands on the multi tenancy feature provided by `flux`. In addition to creating the necessary Kubernetes tenancy resources that `flux` adds, multi tenancy in WGE also adds the following: +- Defining tenancy using a single yaml file that serves as a source of truth for the organization +- Makes use of WGE policy features to enforce non Kubernetes native permissions + +## Prerequisites + +- [`gitops` command line tool](/references/cli-reference/gitops.md) +- [Tenancy File](#tenancy-file) (optional) +- [Policies](/policy/intro.mdx) (optional) + +## How it works + +`gitops` command line tool is responsible for creating the multi tenancy resources. The tool is distributed as part of WGE offering. It reads the definitions of a yaml file and can either apply the necessary changes directly to the cluster or output it to stdout so it can be saved into a file and pushed to a repo to be reconciled by `flux`. + +To make use of the policy features, [policy agent](/policy/intro.mdx) needs to be installed in the necessary cluster(s). + +### Tenancy file + +Below is an example of a tenancy file: + +
Expand to view + +```yaml title="tenancy.yaml" +--- +tenants: + - name: first-tenant + namespaces: + - first-ns + - name: second-tenant + namespaces: + - second-test-ns + - second-dev-ns + allowedRepositories: + - kind: GitRepository + url: https://github.com/testorg/testrepo + - kind: GitRepository + url: https://github.com/testorg/testinfo + - kind: Bucket + url: minio.example.com + - kind: HelmRepository + url: https://testorg.github.io/testrepo + allowedClusters: + - kubeConfig: cluster-1-kubeconfig + - kubeConfig: cluster-2-kubeconfig + teamRBAC: + groupNames: + - foo-group + - bar-group + rules: + - apiGroups: + - '' + resources: + - 'namespaces' + - 'pods' + verbs: + - 'list' + - 'get' + deploymentRBAC: + bindRoles: + - name: foo-role + kind: Role + rules: + - apiGroups: + - '' + resources: + - 'namespaces' + - 'pods' + verbs: + - 'list' + - 'get' +serviceAccount: + name: "reconcilerServiceAccount" +``` + +
+ +The file above defines two tenants: `first-tenant` and `second-tenant` as follows: + +- `namespaces`: describes which namespaces should be part of the tenant. Meaning that users who are part of the tenant would have access on those namespaces. +- `allowedRepositories`: limits the `flux` repositories sources that can be used in the tenant's namespaces. This is done through policies and thus requires `policy-agent` to be deployed on the cluster which will stop these sources from being deployed if they aren't allowed as part of the tenant. IT consists of: + - `kind`: the `flux` source kind. Can be: `GitRepository`, `Bucket` and `HelmRepository`. + - `url`: the URL for that source. +- `allowedClusters`: limits which secrets containing cluster configuraton can be used. It stops WGE `GitopsCluster` and flux `Kustomization` from being deployed if they point to a secret not in the list, essentially giving control on which cluster can be added to a multi-cluster setup. Requires `policy-agent`. + - `kubeConfig`: name of the secret that can be used for this tenant. +- `teamRBAC`: Generate Roles and Rolebindings for a list of `groupNames`. This allows you to easily give an OIDC group access to a tenant's resources. When the Weave Gitops Enterprise UI is configured with your OIDC provider, tenants can log in and view the status of the resources they have been granted access to. +- `deploymentRBAC`: generate Roles and Rolebindings for a service account. Can additionally bind to an existing Roles/ClusterRoles. Would use the global service account if specified in the tenants file, otherwise it will use the created service account which takes the tenant name. If not specified a Rolebinding would be created that binds to `cluster-admin` ClusterRole. + +Global options: + +- `serviceAccount`: Override the name of the generated `ServiceAccount` for all tenants. This allows you to easily use the flux controllers' [`--default-service-account`](https://github.com/fluxcd/flux2-multi-tenancy#enforce-tenant-isolation) feature. Tenants do not need to make sure they correctly specify the `serviceAccount` when using `Kustomization` or `HelmRelease` resources. The kustomization-controller and helm-controller will instead look for the `default-service-account` in the namespace being reconciled to and use that. Just configure `serviceAccount.name` and `--default-service-account` to the same value. + +### Gitops create tenants command + +The command creates the necessary resources to apply multi tenancy on the user's cluster. To use the command to apply the resources directly the user needs to have the necessary configuration to connect to the desired cluster. +The command considers the tenancy file as a source of truth and will change the cluster state to match what is currently described in the file. + +For more control on a specific tenant a tenancy file should be used, the command allows the creation of the base resources that defines a tenancy through the arguments: + +```bash +gitops create tenants --name test-tenant --namespace test-ns1 --namespace test-ns2 +``` + +
Expand to view command output + +```bash +namespace/test-ns1 created +test-ns1/serviceaccount/test-tenant created +test-ns1/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created +namespace/test-ns2 created +test-ns2/serviceaccount/test-tenant created +test-ns2/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created +policy.pac.weave.works/weave.policies.tenancy.test-tenant-allowed-application-deploy created +``` + +
+ +The above will create the namespaces and permissions through a `ServiceAccount` with the same name as the tenant, `test-tenant` in the case of the above example, in each required namespace. +The same can be done through a file as follows: + +```yaml +tenants: + - name: test-tenant + namespaces: + - test-ns1 + - test-ns2 +``` + +```bash +gitops create tenants --from-file tenants.yaml +``` + +
Expand to view command output + +```bash +namespace/test-ns1 created +test-ns1/serviceaccount/test-tenant created +test-ns1/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created +namespace/test-ns2 created +test-ns2/serviceaccount/test-tenant created +test-ns2/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created +policy.pac.weave.works/weave.policies.tenancy.test-tenant-allowed-application-deploy created +``` + +
+ +To check the resources that would be deployed first use the `export` flag: + +```bash +gitops create tenants --from-file tenants.yaml --export +``` + +
Expand to view command output + +```bash +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + labels: + toolkit.fluxcd.io/tenant: test-tenant + name: test-ns1 +spec: {} +status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + toolkit.fluxcd.io/tenant: test-tenant + name: test-tenant + namespace: test-ns1 +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + toolkit.fluxcd.io/tenant: test-tenant + name: test-tenant-service-account-cluster-admin + namespace: test-ns1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: test-tenant + namespace: test-ns1 +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + labels: + toolkit.fluxcd.io/tenant: test-tenant + name: test-ns2 +spec: {} +status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + toolkit.fluxcd.io/tenant: test-tenant + name: test-tenant + namespace: test-ns2 +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + toolkit.fluxcd.io/tenant: test-tenant + name: test-tenant-service-account-cluster-admin + namespace: test-ns2 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: test-tenant + namespace: test-ns2 +--- +apiVersion: pac.weave.works/v2beta2 +kind: Policy +metadata: + creationTimestamp: null + labels: + toolkit.fluxcd.io/tenant: test-tenant + name: weave.policies.tenancy.test-tenant-allowed-application-deploy +spec: + category: weave.categories.tenancy + code: | + package weave.tenancy.allowed_application_deploy + + controller_input := input.review.object + violation[result] { + namespaces := input.parameters.namespaces + targetNamespace := controller_input.spec.targetNamespace + not contains_array(targetNamespace, namespaces) + result = { + "issue detected": true, + "msg": sprintf("using target namespace %v is not allowed", [targetNamespace]), + } + } + violation[result] { + serviceAccountName := controller_input.spec.serviceAccountName + serviceAccountName != input.parameters.service_account_name + result = { + "issue detected": true, + "msg": sprintf("using service account name %v is not allowed", [serviceAccountName]), + } + } + contains_array(item, items) { + items[_] = item + } + description: Determines which helm release and kustomization can be used in a tenant + how_to_solve: "" + id: weave.policies.tenancy.test-tenant-allowed-application-deploy + name: test-tenant allowed application deploy + parameters: + - name: namespaces + required: false + type: array + value: + - test-ns1 + - test-ns2 + - name: service_account_name + required: false + type: string + value: test-tenant + provider: kubernetes + severity: high + standards: [] + tags: + - tenancy + targets: + kinds: + - HelmRelease + - Kustomization + labels: [] + namespaces: + - test-ns1 + - test-ns2 +status: {} +--- +``` + +
+ +Applying the resources through the command line is not usually recommended. For WGE the recommended way is to commit the result of the `create tenants` command to source control and let `flux` handle deployment. To achieve that you can save the result of the `export` to a file: + +```bash +gitops create tenants --from-file tenants.yaml --export > clusters/management/tenants.yaml +``` diff --git a/website/versioned_docs/version-0.24.0/workspaces/view-workspaces.mdx b/website/versioned_docs/version-0.24.0/workspaces/view-workspaces.mdx new file mode 100644 index 0000000000..43e0bd361f --- /dev/null +++ b/website/versioned_docs/version-0.24.0/workspaces/view-workspaces.mdx @@ -0,0 +1,26 @@ +--- +title: Workspaces View +hide_title: true +--- + +import TierLabel from "./../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Workspaces List View + +From the side menu, you can click on the **Workspaces** tab to go to the workspaces list view. + +This view lists workspaces across all clusters. You can filter workspaces by their clusters or their names. + +![Workspaces List View](./imgs/list-workspaces-view.png) + +## Workspace Details View + +You can go to this view by clicking on the name of the workspace in the [Workspaces List View](#Workspaces-list-view). + +In this view you can see all details of the workspace such as its name, namespace, and all resources related to this workspace. + +![Workspaces Details View](./imgs/workspace-details-view.png) diff --git a/website/versioned_sidebars/version-0.24.0-sidebars.json b/website/versioned_sidebars/version-0.24.0-sidebars.json new file mode 100644 index 0000000000..07abb1d93d --- /dev/null +++ b/website/versioned_sidebars/version-0.24.0-sidebars.json @@ -0,0 +1,320 @@ +{ + "docs": [ + { + "type": "category", + "label": "Introducing GitOps", + "collapsed": false, + "link": { + "type": "doc", + "id": "intro" + }, + "items": [ + { + "type": "category", + "label": "Getting Started", + "collapsed": false, + "link": { + "type": "doc", + "id": "getting-started/intro" + }, + "items": [ + { + "type": "category", + "label": "0. Install Weave GitOps", + "collapsed": true, + "link": { + "type": "doc", + "id": "installation/index" + }, + "items": [ + "installation/weave-gitops", + { + "type": "category", + "label": "Weave GitOps Enterprise", + "link": { + "type": "doc", + "id": "installation/weave-gitops-enterprise/index" + }, + "items": [ + "installation/weave-gitops-enterprise/airgap" + ] + }, + "installation/aws-marketplace" + ] + }, + "getting-started/ui", + "getting-started/deploy" + ] + } + ] + }, + { + "type": "category", + "label": "Enterprise Edition", + "link": { + "type": "doc", + "id": "intro-ee" + }, + "items": [ + "releases" + ] + }, + { + "type": "category", + "label": "Access Configuration", + "items": [ + "configuration/recommended-rbac-configuration", + { + "type": "category", + "label": "Securing Access to the Dashboard", + "collapsed": false, + "link": { + "type": "doc", + "id": "configuration/securing-access-to-the-dashboard" + }, + "items": [ + "configuration/oidc-access", + "configuration/emergency-user" + ] + }, + "configuration/service-account-permissions", + "configuration/user-permissions", + "configuration/tls" + ] + }, + { + "type": "category", + "label": "Guides", + "items": [ + "guides/setting-up-dex", + "guides/cert-manager", + "guides/displaying-custom-metadata", + "guides/deploying-capa", + "guides/using-terraform-templates", + "guides/delivery", + "guides/flagger-manual-gating" + ] + }, + { + "type": "category", + "label": "GitOps Run", + "link": { + "type": "doc", + "id": "gitops-run/overview" + }, + "items": [ + "gitops-run/get-started" + ] + }, + { + "type": "category", + "label": "Cluster Management", + "link": { + "type": "doc", + "id": "cluster-management/intro" + }, + "items": [ + "cluster-management/getting-started", + "cluster-management/cluster-api-providers", + "cluster-management/managing-existing-clusters", + "cluster-management/provider-identities", + "cluster-management/deleting-a-cluster", + "cluster-management/profiles", + "cluster-management/add-applications", + "cluster-management/gitrepo-selection", + "cluster-management/disable-capi" + ] + }, + { + "type": "category", + "label": "Explorer", + "link": { + "type": "doc", + "id": "explorer/intro" + }, + "items": [ + "explorer/getting-started", + "explorer/configuration", + "explorer/querying", + "explorer/operations" + ] + }, + { + "type": "category", + "label": "Terraform", + "items": [ + "terraform/overview", + "terraform/get-started", + { + "type": "category", + "label": "Using Terraform CR", + "items": [ + "terraform/using-terraform-cr/provision-resources-and-auto-approve", + "terraform/using-terraform-cr/plan-and-manually-apply-terraform-resources", + "terraform/using-terraform-cr/provision-resources-and-write-output-data", + "terraform/using-terraform-cr/detect-drifts-only-without-plan-or-apply", + "terraform/using-terraform-cr/drift-detection-disabled", + "terraform/using-terraform-cr/set-variables-for-trraform-resources", + "terraform/using-terraform-cr/custom-backend", + "terraform/using-terraform-cr/depends-on", + "terraform/using-terraform-cr/modules", + "terraform/using-terraform-cr/customize-runner" + ] + }, + "terraform/backup-and-restore", + "terraform/oci-artifact", + "terraform/aws-eks", + "terraform/terraform-enterprise", + "terraform/tfctl", + "terraform/environment-variables" + ] + }, + { + "type": "category", + "label": "Pipelines", + "link": { + "type": "doc", + "id": "pipelines/intro" + }, + "items": [ + "pipelines/getting-started", + "pipelines/authorization", + "pipelines/promoting-applications", + "pipelines/pipeline-templates", + "pipelines/pipelines-with-jenkins", + "pipelines/pipelines-with-tekton", + { + "type": "category", + "label": "Reference", + "items": [ + { + "type": "category", + "label": "v1alpha1", + "items": [ + "pipelines/spec/v1alpha1/pipeline" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "Workspaces", + "link": { + "type": "doc", + "id": "workspaces/intro" + }, + "items": [ + "workspaces/multi-tenancy", + "workspaces/view-workspaces" + ] + }, + { + "type": "category", + "label": "Policy", + "link": { + "type": "doc", + "id": "policy/intro" + }, + "items": [ + "policy/getting-started", + "policy/authorization", + "policy/policy", + "policy/weave-policy-profile", + "policy/policy-set", + "policy/policy-configuration", + "policy/releases", + "policy/commit-time-checks" + ] + }, + { + "type": "category", + "label": "Secrets", + "link": { + "type": "doc", + "id": "secrets/intro" + }, + "items": [ + "secrets/intro", + "secrets/getting-started", + "secrets/bootstraping-secrets", + "secrets/setup-eso", + "secrets/setup-sops", + "secrets/manage-secrets-ui", + { + "type": "category", + "label": "Reference", + "items": [ + { + "type": "category", + "label": "v1alpha1", + "items": [ + "secrets/spec/v1alpha1/secretSync" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "Templates", + "link": { + "type": "doc", + "id": "gitops-templates/intro" + }, + "items": [ + "gitops-templates/quickstart-templates", + { + "type": "category", + "label": "Creating Templates", + "link": { + "type": "doc", + "id": "gitops-templates/creating-templates" + }, + "items": [ + "gitops-templates/resource-templates", + "gitops-templates/repo-rendered-paths", + "gitops-templates/profiles", + "gitops-templates/annotations", + "gitops-templates/params", + "gitops-templates/supported-langs", + "gitops-templates/create-cluster-example", + "gitops-templates/capd-example" + ] + }, + "gitops-templates/cli", + "gitops-templates/versions" + ] + }, + { + "type": "category", + "label": "GitOpsSets", + "items": [ + "gitopssets/intro", + "gitopssets/installation", + "gitopssets/guide", + "gitopssets/api-reference", + "gitopssets/releases" + ] + } + ], + "ref": [ + { + "type": "doc", + "label": "OSS Helm Reference", + "id": "references/helm-reference" + }, + { + "type": "category", + "label": "CLI Reference", + "items": [ + { + "type": "autogenerated", + "dirName": "references/cli-reference" + } + ] + } + ] +} diff --git a/website/versions.json b/website/versions.json index 8b9676e07d..fd6b4898dd 100644 --- a/website/versions.json +++ b/website/versions.json @@ -1,4 +1,5 @@ [ + "0.24.0", "0.23.0", "0.22.0", "0.21.2",