diff --git a/_data/home-content.yml b/_data/home-content.yml index c2155b4f8..f78bc0a8c 100644 --- a/_data/home-content.yml +++ b/_data/home-content.yml @@ -1,5 +1,4 @@ - - + - title: Getting started icon: images/home-icons/started.svg @@ -16,6 +15,63 @@ - title: Concepts in Codefresh localurl: /docs/getting-started/concepts/ +- title: Pipeline integrations + icon: images/home-icons/cloud-integrations.png + links: + - title: Hosted GitOps + localurl: /docs/integrations/codefresh-hosted-gitops/ + - title: Git Providers + localurl: /docs/integrations/git-providers/ + - title: Kubernetes + localurl: /docs/integrations/kubernetes/ + - title: Amazon Web Services + localurl: /docs/integrations/amazon-web-services/ + - title: Microsoft Azure + localurl: /docs/integrations/microsoft-azure/ + - title: Google Cloud + localurl: /docs/integrations/google-cloud/ + - title: Docker Registries + localurl: /docs/integrations/docker-registries/ + - title: Secret Storage + localurl: /docs/integrations/secret-storage/ + - title: Helm + localurl: /docs/integrations/helm/ + - title: Argo CD + localurl: /docs/integrations/argocd/ + - title: Datadog + localurl: /docs/integrations/datadog/ + - title: Jenkins integration/migration + localurl: /docs/integrations/jenkins-integration/ + - title: Codefresh API + localurl: /docs/integrations/codefresh-api/ + + + +- title: GitOps integrations + icon: images/home-icons/integrations.svg + url: '' + links: + - title: Image enrichment with GitOps integrations + localurl: /docs/gitops-integrations/image-enrichment-overview/ + - title: Codefresh Classic for GitOps + localurl: /docs/gitops-integrations/ci-integrations/codefresh-classic/ + - title: GitHub Actions for GitOps + localurl: /docs/gitops-integrations/ci-integrations/github-actions/ + - title: Jenkins for GitOps + localurl: /docs/gitops-integrations/ci-integrations/jenkins/ + - title: Jira for GitOps + localurl: /docs/gitops-integrations/issue-tracking/jira/ + - title: Amazon ECR for GitOps + localurl: /docs/gitops-integrations/container-registries/amazon-ecr/ + - title: Docker Hub Registry for GitOps + localurl: /docs/gitops-integrations/container-registries/dockerhub/ + - title: GitHub Container Registry for GitOps + localurl: /docs/gitops-integrations/container-registries/github-cr/ + - title: JFrog Artifactory for GitOps + localurl: /docs/gitops-integrations/container-registries/jfrog/ + - title: Quay Registry for GitOps + localurl: /docs/gitops-integrations/container-registries/quay/ + - title: Dashboards & insights icon: images/home-icons/guides.png url: '' @@ -31,30 +87,30 @@ icon: images/home-icons/guides.png url: '' links: - - title: Building your app - localurl: /docs/ci-cd-guides/packaging-compilation/ - - title: Building Docker images - localurl: /docs/ci-cd-guides/building-docker-images/ - - title: Accessing a Docker registry from Kubernetes cluster - localurl: /docs/ci-cd-guides/access-docker-registry-from-kubernetes/ - - title: Working with Docker registries - localurl: /docs/ci-cd-guides/working-with-docker-registries/ - - title: Adding config maps to namespaces - localurl: /docs/ci-cd-guides/add-config-maps-to-your-namespaces/ - - title: Pull Requests and branches - localurl: /docs/ci-cd-guides/pull-request-branches/ - - title: Pipelines for microservices - localurl: /docs/ci-cd-guides/microservices/ - - title: Deploying to predefined environments - localurl: /docs/ci-cd-guides/environment-deployments/ - - title: Previewing dynamic environments - localurl: /docs/ci-cd-guides/preview-environments/ - - title: Helm best practices - localurl: /docs/ci-cd-guides/helm-best-practices/ - - title: Templating for Kubernetes - localurl: /docs/ci-cd-guides/kubernetes-templating/ - - title: Progressive Delivery - localurl: /docs/ci-cd-guides/progressive-delivery/ + - title: Building your app + localurl: /docs/ci-cd-guides/packaging-compilation/ + - title: Building Docker images + localurl: /docs/ci-cd-guides/building-docker-images/ + - title: Accessing a Docker registry from Kubernetes cluster + localurl: /docs/ci-cd-guides/access-docker-registry-from-kubernetes/ + - title: Working with Docker registries + localurl: /docs/ci-cd-guides/working-with-docker-registries/ + - title: Adding config maps to namespaces + localurl: /docs/ci-cd-guides/add-config-maps-to-your-namespaces/ + - title: Pull Requests and branches + localurl: /docs/ci-cd-guides/pull-request-branches/ + - title: Pipelines for microservices + localurl: /docs/ci-cd-guides/microservices/ + - title: Deploying to predefined environments + localurl: /docs/ci-cd-guides/environment-deployments/ + - title: Previewing dynamic environments + localurl: /docs/ci-cd-guides/preview-environments/ + - title: Helm best practices + localurl: /docs/ci-cd-guides/helm-best-practices/ + - title: Templating for Kubernetes + localurl: /docs/ci-cd-guides/kubernetes-templating/ + - title: Progressive Delivery + localurl: /docs/ci-cd-guides/progressive-delivery/ @@ -127,21 +183,16 @@ - title: Nested workflows localurl: /docs/workflows/nested-workflows/ - title: Configure artifact repository + localurl: /docs/pipelines/configure-artifact-repository/ localurl: /docs/workflows/configure-artifact-repository/ - title: Selectors for concurrency synchronization + localurl: /docs/pipelines/concurrency-limit/ localurl: /docs/workflows/concurrency-limit/ - title: Sharing file systems + localurl: /docs/pipelines/sharing-file-system/ localurl: /docs/workflows/sharing-file-system/ - - -- title: Clients - icon: images/home-icons/client.svg - url: '' - links: - - title: Codefresh CLI - localurl: /docs/clients/csdp-cli/ - title: Installation @@ -172,24 +223,24 @@ icon: images/home-icons/administration.svg url: '' links: - - title: Create a Codefresh account - localurl: /docs/administration/account-user-management/create-codefresh-account/ - - title: Adding users and teams - localurl: /docs/administration/account-user-management/add-users/ - - title: Single Sign-On - localurl: /docs/single-sign-on/ - - title: Set up OAuth2 authentication for Git providers - localurl: /docs/administration/account-user-management/oauth-setup/ - - title: Access control - localurl: /docs/administration/account-user-management/access-control/ - - title: Audit - localurl: /docs/administration/account-user-management/audit/ - - title: Codefresh IP addresses - localurl: /docs/administration/account-user-management/platform-ip-addresses/ - - title: User settings - localurl: /docs/administration/user-self-management/user-settings/ - - title: Manage Git PATs - localurl: /docs/administration/user-self-management/manage-pats/ + - title: Create a Codefresh account + localurl: /docs/administration/account-user-management/create-codefresh-account/ + - title: Adding users and teams + localurl: /docs/administration/account-user-management/add-users/ + - title: Single Sign-On + localurl: /docs/single-sign-on/ + - title: Set up OAuth2 authentication for Git providers + localurl: /docs/administration/account-user-management/oauth-setup/ + - title: Access control + localurl: /docs/administration/account-user-management/access-control/ + - title: Audit + localurl: /docs/administration/account-user-management/audit/ + - title: Codefresh IP addresses + localurl: /docs/administration/account-user-management/platform-ip-addresses/ + - title: User settings + localurl: /docs/administration/user-self-management/user-settings/ + - title: Manage Git PATs + localurl: /docs/administration/user-self-management/manage-pats/ - title: Reference diff --git a/_data/nav.yml b/_data/nav.yml index a4d781667..7f769e9c6 100644 --- a/_data/nav.yml +++ b/_data/nav.yml @@ -1,6 +1,3 @@ - - - - title: Getting started url: "/getting-started" pages: @@ -302,6 +299,112 @@ url: "/what-is-the-codefresh-yaml" +- title: Pipeline integrations + url: "/integrations" + pages: + - title: Codefresh Hosted GitOps + url: "/codefresh-hosted-gitops" + - title: Git Providers + url: "/git-providers" + - title: Kubernetes + url: "/kubernetes" + - title: Amazon Services + url: "/amazon-web-services" + - title: Microsoft Azure + url: "/microsoft-azure" + - title: Google Cloud + url: "/google-cloud" + - title: Docker registries + url: "/docker-registries" + sub-pages: + - title: Docker Hub + url: "/docker-hub" + - title: Azure Docker Registry + url: "/azure-docker-registry" + - title: Amazon EC2 Container Registry + url: "/amazon-ec2-container-registry" + - title: Google Container Registry + url: "/google-container-registry" + - title: Google Artifact Registry + url: "/google-artifact-registry" + - title: JFrog Bintray.io/Artifactory + url: "/bintray-io" + - title: Quay.io + url: "/quay-io" + - title: GitHub Container Registry + url: "/github-container-registry" + - title: DigitalOcean Container Registry + url: "/digital-ocean-container-registry" + - title: Other Registries + url: "/other-registries" + - title: Secret Storage + url: "/secret-storage" + - title: Hashicorp Vault + url: "/hashicorp-vault" + - title: Helm Integration + url: "/helm" + - title: ArgoCD Integration + url: "/argocd" + - title: Datadog Integration + url: "/datadog" + - title: Jira Integration + url: "/jira" + - title: Jenkins Integration + url: "/jenkins-integration" + - title: Codecov Integration + url: "/codecov-integration" + - title: Google Cloud builder + url: "/gcloud-builder" + - title: Google Marketplace Installation + url: "/google-marketplace" + - title: GitHub Actions + url: "/github-actions" + - title: Notifications + url: "/notifications" + sub-pages: + - title: Slack + url: "/slack-integration" + - title: Jira + url: "/jira-integration" + - title: Codefresh API + url: "/codefresh-api" + + +- title: GitOps integrations + url: "/gitops-integrations" + pages: + - title: Image enrichment with GitOps integrations + url: "/image-enrichment-overview" + - title: GitOps CI integrations + url: "/ci-integrations" + sub-pages: + - title: Codefresh Classic + url: "/codefresh-classic" + - title: GitHub Actions + url: "/github-actions" + - title: Jenkins + url: "/jenkins" + - title: GitOps issue tracking integrations + url: "/issue-tracking" + sub-pages: + - title: Jira + url: "/jira" + - title: GitOps container registry integrations + url: "/container-registries" + sub-pages: + - title: Amazon ECR + url: "/amazon-ecr" + - title: Docker Hub Registry + url: "/dockerhub" + - title: GitHub Container Registry + url: "/github-cr" + - title: JFrog Artifactory + url: "/jfrog" + - title: Quay Registry + url: "/quay" + + + - title: Workflows url: "/workflows" pages: @@ -489,3 +592,11 @@ url: "/sla" + + + + + + + + diff --git a/_docs/administration/account-user-management/access-control.md b/_docs/administration/account-user-management/access-control.md index 854374cbe..d65e29c78 100644 --- a/_docs/administration/account-user-management/access-control.md +++ b/_docs/administration/account-user-management/access-control.md @@ -12,7 +12,7 @@ toc: true --- -Codefresh provides seral complementary ways for access control within an organization: +Codefresh provides several complementary ways for access control within an organization: * **Role-based access**: [Role-based access](#users-and-administrators), restricts access to parts of the Codefresh UI intended for account administrators. For example, only an account administrator should be able to change integrations with [git providers]({{site.baseurl}}/docs/integrations/git-providers/) and [cloud services]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/). @@ -51,10 +51,10 @@ The table below lists the functionality available for role-based access. |[External Helm repositories]({{site.baseurl}}/docs/new-helm/add-helm-repository/) | `Admin`| |[Cloud provider settings]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/) | `Admin`| |[Cloud storage settings]({{site.baseurl}}/docs/testing/test-reports/#connecting-your-storage-account) | `Admin`| -|[Shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) | `Admin`| +|[Shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) | `Admin`| |[API token generation]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions) | `Admin`| |[SSO Settings]({{site.baseurl}}/docs/administration/single-sign-on/) | `Admin`| -|[Runtime environment selection]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) | `Admin`| +|[Runtime environment selection]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-settings) | `Admin`| |[Slack settings]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) | `Admin`| |[Audit logs]({{site.baseurl}}/docs/administration/audit-logs/) | `Admin`| |ABAC for Kubernetes clusters | `Admin`| @@ -184,14 +184,14 @@ For pipelines: * `Update` - see and edit allowed pipelines only (including tagging them). * `Delete` - can delete allowed pipelines only. * `Run` - can run allowed pipelines only. -* `Approve` - resume pipelines that are waiting for manual [approval]({{site.baseurl}}/docs/pipelines/steps/approval/). -* `Debug` - allow the usage of the [pipeline debugger]({{site.baseurl}}/docs/pipelines/debugging-pipelines/). +* `Approve` - resume pipelines that are waiting for manual [approval]({{site.baseurl}}/docs/codefresh-yaml/steps/approval/). +* `Debug` - allow the usage of the [pipeline debugger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/debugging-pipelines/). ## Git-repository access restrictions -By default, users can load pipeline definitions when [creating a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/), from the inline editor, or any private or public Git repository. +By default, users can load pipeline definitions when [creating a pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/), from the inline editor, or any private or public Git repository. You can change the default behavior to restrict loading CI pipeline definitions from specific Git repositories or completely disable loading the definitions from all Git repositories. diff --git a/_docs/administration/user-self-management/user-settings.md b/_docs/administration/user-self-management/user-settings.md index 47743fb42..ad969ff3b 100644 --- a/_docs/administration/user-self-management/user-settings.md +++ b/_docs/administration/user-self-management/user-settings.md @@ -74,7 +74,7 @@ max-width="100%" When you connect your [Git provider]({{site.baseurl}}/docs/integrations/git-providers/) during sign-up, you may choose to let Codefresh access only your public Git repositories. -To allow Codefresh to also add [Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) on private repositories you need to explicitly enable it in this section. +To allow Codefresh to also add [Git triggers]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) on private repositories you need to explicitly enable it in this section. Note that options available highly depend on what Git provider you are using with Codefresh. --> diff --git a/_docs/ci-cd-guides/access-docker-registry-from-kubernetes.md b/_docs/ci-cd-guides/access-docker-registry-from-kubernetes.md deleted file mode 100644 index 42b04b6ca..000000000 --- a/_docs/ci-cd-guides/access-docker-registry-from-kubernetes.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: "Accessing Docker registry from Kubernetes cluster" -description: "Allow Kubernetes to pull Docker images from your registry" -group: ci-cd-guides -toc: true ---- - -Kubernetes deployments are based on a "pull" approach. When you deploy your application to a Kubernetes -cluster, instead of uploading the application itself, as in traditional deployments, Kubernetes pulls the Docker images to its nodes on its own. - - - {% include -image.html -lightbox="true" -file="/images/getting-started/quick-start-k8s/overview.png" -url="/images/getting-started/quick-start-k8s/overview.png" -alt="Kubernetes deployments" -caption="Kubernetes deployments" -max-width="80%" -%} - -If your Docker images are in a public repository such as Docker Hub, Kubernetes can pull them right away. In most cases however your images are in a private Docker registry and Kubernetes must be given explicit access to it. - -Use [Docker registry secrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/){:target="\_blank"} to give Kubernetes access to private Docker registries. When there is a deployment, each Kubernetes pod can pull Docker images directly from the target registry. - -## Giving access to a Docker Registry via the UI - -Codefresh allows you to easily create and pull secrets for your cluster. - -1. In the Codefresh UI, set up an integration with your [Docker registry in Codefresh]({{site.baseurl}}/docs/integrations/docker-registries/). - Codefresh can work with any compliant Docker registry either in the cloud or behind the firewall. - -1. To view the Kubernetes dashboard, from the Ops section in the sidebar, select [**Kubernetes Services**](https://g.codefresh.io/kubernetes/services/){:target="\_blank"}. -1. Click **Add Service**. -1. Do the following: - * Select your **Cluster** and **Namespace** from the respective lists. - * From the **Image Pull Secret** dropdown with all the pull secrets for the selected namespace, select **Create Registry Pull secret**. - * From the list of all the connected Docker registries in Codefresh, select the registry you want. - Codefresh automatically creates a secret for you. - - {% include -image.html -lightbox="true" -file="/images/guides/kubernetes/create-secret.png" -url="/images/guides/kubernetes/create-secret.png" -alt="Create Pull Secret" -caption="Create Pull Secret" -max-width="80%" -%} - - ->The secret is created as soon as you select your Docker registry from the dropdown. There is no need to actually deploy anything from this screen for the changes to take effect. - - {% include -image.html -lightbox="true" -file="/images/guides/kubernetes/secret-dropdown.png" -url="/images/guides/kubernetes/secret-dropdown.png" -alt="Docker Registry Access" -caption="Docker Registry Access" -max-width="80%" -%} - -From now on, the cluster in this namespace can deploy Docker images from the selected registry. -To apply the changed secret, you don't really need to finish the deployment. Feel free to -close the screen and go to another Codefresh page. - ->Codefresh automatically uses the secret you defined in all deployments that are performed via the UI by dynamically creating the correct manifests for you behind the scenes. -If you wish to use your own manifests, you need to include the secret yourself, as explained in the next section. - - -## Giving access to a Docker Registry with kubectl - -You can also use the `kubectl` command directly to give access to a Docker registry. -As this method is not specific to Codefresh, read the [official kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/){:target="\_blank"}. - - -### Creating the Docker registry secret - -The credentials depend upon the [type of registry]({{site.baseurl}}/docs/integrations/docker-registries/) you use. - -- The Docker server to use is a domain such `gcr.io`, `azurecr.io` -- The username is your account username. -- The password is a specific Docker registry password or any other kind of token. You need to check the documentation of your registry provider for the exact details. - ->Be sure to create the secret in the namespace in which your application will run. -Pull secrets are specific to a namespace. If you want to deploy to multiple namespaces, you need to create a secret for each one of them. - -This is an example of creating a pull secret to the Azure registry. You can use the same command for any other private registry. - - `Shell` -{% highlight sh %} -{% raw %} - -export DOCKER_REGISTRY_SERVER=mysampleregistry.azurecr.io -export DOCKER_USER=myregistryname -export DOCKER_PASSWORD=myregistrytoken -export DOCKER_EMAIL=YOUR_EMAIL - -kubectl create secret docker-registry cfcr\ - --docker-server=$DOCKER_REGISTRY_SERVER\ - --docker-username=$DOCKER_USER\ - --docker-password=$DOCKER_PASSWORD\ - --docker-email=$DOCKER_EMAIL -{% endraw %} -{% endhighlight %} - -### Using the Docker registry secret - -To use the secret you just created, you need to include it, either in: - -* Your [pod manifests](https://kubernetes.io/docs/concepts/containers/#specifying-imagepullsecrets-on-a-pod){:target="\_blank"} -* The [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account){:target="\_blank"} - -For Docker registry secret usage, we recommend following the official Kubernetes documentation. - -## Giving access to a Docker Registry via the Codefresh CLI - -The Codefresh CLI can also create pull secrets in an automated manner. - -See [Image pull Secret](https://codefresh-io.github.io/cli/more/image-pull-secret/){:target="\_blank"}. - -## Related articles -[Deploy to Kubernetes - quick start]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) -[Managing your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) - - diff --git a/_docs/ci-cd-guides/add-config-maps-to-your-namespaces.md b/_docs/ci-cd-guides/add-config-maps-to-your-namespaces.md deleted file mode 100644 index c525eba81..000000000 --- a/_docs/ci-cd-guides/add-config-maps-to-your-namespaces.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: "Adding config maps to namespaces" -description: "Manage Kubernetes Config Maps with Codefresh" -group: ci-cd-guides -toc: true ---- -Many applications require configuration with files, environment variables, and command line arguments. It makes applications portable and easily manageable. While this makes for easy configuration, it can become very hard to support tons of config files for different environments and hundreds of microservices. - -Kubernetes provides an elegant and very convenient way for application configuration, using *configuration maps*. You can find more details about config maps at [https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/){:target="_blank"}. - -You can manage all your cluster configuration using Codefresh. - -## View existing config maps - -1. In the Codefresh UI, from the Ops section in the sidebar, select [**Kubernetes Services**](https://g.codefresh.io/kubernetes/services/){:target="\_blank"}. -1. Switch to list view. - -{% include -image.html -lightbox="true" -file="/images/guides/config-maps/change-view.png" -url="/images/guides/config-maps/change-view.png" -alt="Change View" -caption="Change View" -max-width="50%" -%} - -{:start="3"} -1. Select a namespace and hover over it. -1. Click the **Settings** icon which appears at the end of the row. - A list of all config maps within this namespace are displayed, including date of creation and number of configuration variables inside these maps. - - - -## Add a new config map - -1. From the list of config maps, click **Create a New Config Map**. - -{% include image.html -lightbox="true" -file="/images/guides/config-maps/manage-maps-namespace.png" -url="/images/guides/config-maps/manage-maps-namespace.png" -alt="Create a new config map in namespace" -caption="Create a new config map in namespace" -max-width="40%" -%} - -{:start="2"} -1. In the Add a New Config Map form, enter a **Name**, add variables, as described in [Managing variables in your config maps](#managing-variables-in-config-maps), and then click **Create**. - -{% include image.html -lightbox="true" -file="/images/guides/config-maps/new-config-map-settings.png" -url="/images/guides/config-maps/new-config-map-settings.png" -alt="Define settings for new config map" -caption="Define settings for new config map" -max-width="40%" -%} - -### Managing variables in config maps -There are three options to add variables to config maps: -1. Add a single variable at a time -1. Add multiple variables by copying and pasting from text or file -1. Import a set of variables from an existing config map - - -#### Add a single variable to config map - -This is the easiest way to add a variable to the config map. This method is very useful to quickly create a small configmap with 1-2 variables: -1. Enter Key name and the Key value -1. Click **Add Variable**. - -{% include image.html -lightbox="true" -file="/images/guides/config-maps/add-new-single-variable.png" -url="/images/guides/config-maps/add-new-single-variable.png" -alt="Add single variable at a time to config map" -caption="Add single variable at a time to config map" -max-width="40%" -%} - - -#### Import variables from text/file -If you already have configuration variables in a `*.property` file, you can easily import it to your configmap. - -**Import from text**: - - -1. Click **Import from text**. -1. Copy text from file and paste it within the text area in the required format. -1. Click **Apply**. - -{% include image.html -lightbox="true" -file="/images/guides/config-maps/import-variables-from-text.png" -url="/images/guides/config-maps/import-variables-from-text.png." -alt="Add multiple variables from text or file to config map" -caption="Add multiple variables from text or file to config map" -max-width="40%" -%} - -**Import from file**: - -1. Click **Import from file**. -1. Select the file from your computer, and click **Open**. - - -#### Copy variables from existing config map - -You can easily copy the variables from an existing config map file, and use it in other namespaces. - -1. Click **Copy from Existing Config Map**. -1. Select the **Cluster** and **Namespace** from which to copy the configmap. -1. Select the configmap from the list, and click **Select**. - -{% include image.html -lightbox="true" -file="/images/guides/config-maps/select-cluster-namespace.png" -url="/images/guides/config-maps/select-cluster-namespace.png" -alt="Copy variables from existing config map" -caption="Copy variables from existing config map" -max-width="40%" -%} - -### Edit/remove variables in config maps -You can easily edit or remove variables in your config maps. - -1. Select the config map with the variables to modify or remove. -1. Click the **Edit** (pencil) icon. -1. Add new variables, as described in [Managing variables in your config maps](#managing-variables-in-config-maps). - -{% include image.html -lightbox="true" -file="/images/guides/config-maps/edit-remove-config-map-variables.png" -url="/images/guides/config-maps/edit-remove-config-map-variables.png" -alt="Edit/remove variables in config maps" -caption="Edit/remove variables in config maps" -max-width="40%" -%} - -To remove a config map, click on "remove" icon in the selected row. After your confirmation, the configmap will be removed. - -## Related articles -[Connect to your Kubernetes cluster]({{site.baseurl}}/docs/integrations/add-kubernetes-cluster/) -[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) -[Deploy to Kubernetes - quick start]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) diff --git a/_docs/ci-cd-guides/building-docker-images.md b/_docs/ci-cd-guides/building-docker-images.md deleted file mode 100644 index 4c60597b0..000000000 --- a/_docs/ci-cd-guides/building-docker-images.md +++ /dev/null @@ -1,440 +0,0 @@ ---- -title: "Building Docker images" -description: "Create Docker images from Dockerfiles" -group: ci-cd-guides -toc: true ---- - -Codefresh has first-class Docker build support. You can build Docker images in your pipeline in a declarative manner using the [build step]({{site.baseurl}}/docs/pipelines/steps/build/). - ->If your application is not deployed as a Docker image, see the [basic compilation/packaging guide]({{site.baseurl}}/docs/ci-cd-guides/packaging-compilation/) instead. - -Building a Dockerfile in a pipeline works the same way as building the Dockerfile locally on your workstation. -Your Dockerfile should be valid, and follow all the best practices such as: -* Dockerfiles should be self-contained -* You should not have actions with side effects inside Dockerfiles -* You should have a proper `.dockerignore` file to minimize the Docker context size -* Dockerfile directives should be placed according to best practices for caching - -For more details, see also the [Caching in pipelines]({{site.baseurl}}/docs/pipelines/pipeline-caching/#distributed-docker-layer-caching). - At the very least, you should understand and use [Docker multistage builds](https://docs.docker.com/develop/develop-images/multistage-build/){:target="\_blank"} (although Codefresh supports all kinds of Dockerfiles natively). Basically, if your Dockerfile is already optimized on your local workstation, it should also be optimized for Codefresh. - -Codefresh uses the standard Docker daemon (or optionally Buildkit) behind the scenes, so if your Dockerfile has issues when you try to build it locally, it will have the same issues in a pipeline. - -## Docker packaging strategies - -There are many ways to create a Dockerfile, and most organizations typically follow a different path depending on the type of application they package. -Brand-new applications are very easy to package into multistage Dockerfiles, while legacy/existing applications are adapted to dockerfiles that package an existing artifact. - -We suggest spending some more time and creating multistage builds for all applications (even legacy ones). -Explaining all virtues of multistage Docker builds is outside the scope of this article but in summary, multistage builds: - -1. Are self-contained and self-describable -1. Result in a very small Docker image -1. Can be easily built by all project stakeholders, even non-developers -1. Are very easy to understand and maintain -1. Do not require a development environment, apart from the source code itself -1. Can be packaged with very simple pipelines, not only in Codefresh, but in other CI systems as well - -Multi-stage builds are also essential in organizations that employ multiple programming languages. The ease of building a Docker image by anyone without the need for JDK/Node/Python/etc. cannot be overstated. - -## Production-ready Docker images with multistage builds - -If you have a multistage Dockerfile, then the respective pipeline in Codefresh is straightforward. You only need two pipeline steps: - -1. A clone step to check out the source code -1. A build step to create the Docker image - -For example, here is a [Java dockerfile]({{site.baseurl}}/docs/example-catalog/ci-examples/java/spring-boot-2/#spring-boot-2-and-docker-multi-stage-builds): - - `Dockerfile` -{% highlight docker %} -{% raw %} -FROM maven:3.5.2-jdk-8-alpine AS MAVEN_TOOL_CHAIN -COPY pom.xml /tmp/ -RUN mvn -B dependency:go-offline -f /tmp/pom.xml -s /usr/share/maven/ref/settings-docker.xml -COPY src /tmp/src/ -WORKDIR /tmp/ -RUN mvn -B -s /usr/share/maven/ref/settings-docker.xml package - -FROM java:8-jre-alpine - -EXPOSE 8080 - -RUN mkdir /app -COPY --from=MAVEN_TOOL_CHAIN /tmp/target/*.jar /app/spring-boot-application.jar - -ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] - -{% endraw %} -{% endhighlight %} - -The Codefresh pipeline that builds this Dockerfile is the following: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -stages: - - prepare - - build -steps: - main_clone: - title: Cloning main repository... - stage: prepare - type: git-clone - repo: 'codefresh-contrib/spring-boot-2-sample-app' - revision: master - git: github - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: spring-boot-2-sample-app - working_directory: ./ - tag: 'multi-stage' - dockerfile: Dockerfile -{% endraw %} -{% endhighlight %} - -The beauty of this pipeline is that it is *exactly the same* for all multistage Dockerfiles regardless of the programming language that you use. So even if the Dockerfile was packaging a Node or Go application, the pipeline is oblivious to it. - -{% include image.html -lightbox="true" -file="/images/guides/build-docker-images/multi-stage-pipeline.png" -url="/images/guides/build-docker-images/multi-stage-pipeline.png" -alt="Multi-stage Docker builds" -caption="Multi-stage Docker builds" -max-width="100%" -%} - -You can find multistage build examples for other programming languages in the [example section]({{site.baseurl}}/docs/example-catalog/ci-examples/examples/). - - -## Creating self-contained Docker images - -Even though multistage Dockerfiles are the optimal way to build Docker images, Codefresh still supports "plain" Dockerfiles which do not have multiple stages. - -As an example, this Dockerfile for a Python application is created from a single parent image (although we use the slim variant to make the final image size smaller). - - `Dockerfile` -{% highlight docker %} -{% raw %} -FROM python:3.6-slim - -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 -RUN mkdir /code -WORKDIR /code -RUN pip install --upgrade pip -COPY requirements.txt /code/ - -RUN pip install -r requirements.txt -COPY . /code/ - -EXPOSE 8000 - -CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"] -{% endraw %} -{% endhighlight %} - - -This Dockerfile can be built in the same way as a multistage one. We still need two pipeline steps, one to check out the code and another to build the Docker image. - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -stages: - - prepare - - build -steps: - main_clone: - title: Cloning main repository... - stage: prepare - type: git-clone - repo: 'codefreshdemo/cf-example-python-django' - revision: master - git: github - build_my_image: - title: Building Docker Image - stage: build - type: build - image_name: my-django-image - working_directory: ./ - tag: master - dockerfile: Dockerfile -{% endraw %} -{% endhighlight %} - -The pipeline is similar to the previous one, so you can handle multistage and non-multistage builds in the same manner in Codefresh pipelines. - -{% include image.html -lightbox="true" -file="/images/guides/build-docker-images/non-multi-stage-pipeline.png" -url="/images/guides/build-docker-images/non-multi-stage-pipeline.png" -alt="Non Multi-stage Docker builds" -caption="Non Multi-stage Docker builds" -max-width="100%" -%} - -It is important however to note that the Dockerfile is still self-contained. It depends only on the source code of the application and all instructions needed to package the code are included in the Dockerfile itself. - - - -## Packaging existing artifacts in Docker images - -An alternative way to create Docker images is to just package an existing artifact or application which is created earlier in the CI process. - ->Though this is a very popular way to create Dockerfiles, and Codefresh supports it, we do **NOT** recommend writing Dockerfiles like this. Please learn about Docker multistage builds if you are not familiar with them. - -You can see this pattern in all kinds of Dockerfiles that assume the application is already there (or that dependencies are already downloaded). Here is a [Dockerfile that packages an existing JAR]({{site.baseurl}}/docs/example-catalog/ci-examples/java/spring-boot-2/#spring-boot-2-and-docker-package-only) file. - - `Dockerfile` -{% highlight docker %} -{% raw %} -FROM java:8-jre-alpine - -EXPOSE 8080 - -RUN mkdir /app -COPY target/*.jar /app/spring-boot-application.jar - -ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] - -HEALTHCHECK --interval=1m --timeout=3s CMD wget -q -T 3 -s http://localhost:8080/actuator/health/ || exit 1 -{% endraw %} -{% endhighlight %} - -If you have Dockerfiles like this you need to enrich the basic pipeline shown in the previous sections and run a freestyle step that prepares the artifact **BEFORE** building the Docker image. Read more about [freestyle steps in the basic CI process]({{site.baseurl}}/docs/ci-cd-guides/packaging-compilation/). - - -There are several disadvantages to these kinds of Dockerfiles: - -* The Dockerfile is not self-contained anymore. You need to manually run some other command before actually running the Docker build -* A person who wants to build the Docker image on their workstation is also forced to have a full dev environment (e.g. the JDK or Node.js) -* The version of a development tool is specified twice (one in the Dockerfile and one in the CI/CD system) - -Here is the Codefresh pipeline: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -stages: - - prepare - - compile - - build -steps: - main_clone: - title: Cloning main repository... - stage: prepare - type: git-clone - repo: 'codefresh-contrib/spring-boot-2-sample-app' - revision: master - run_unit_tests: - title: Compile/Unit test - stage: compile - image: 'maven:3.5.2-jdk-8-alpine' - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: spring-boot-2-sample-app - working_directory: ./ - tag: 'non-multi-stage' - dockerfile: Dockerfile.only-package -{% endraw %} -{% endhighlight %} - -This pipeline has an intermediate [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that runs a specific version of Maven/JDK to create the JAR file. The JAR file is then available to the next step via [the Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). - -{% include image.html -lightbox="true" -file="/images/guides/build-docker-images/package-only-pipeline.png" -url="/images/guides/build-docker-images/package-only-pipeline.png" -alt="Package only Docker builds" -caption="Package only Docker builds" -max-width="100%" -%} - -In the example above, you can see that the version of JDK/JRE is mentioned twice (one in the pipeline and one in the Dockerfile). If developers decide to upgrade to Java 11, they need to change both places (and in big companies pipelines are usually managed by operators). If this was a multistage build then a developer could simply change just the Dockerfile and be certain that the pipeline is "upgraded" as well. - -We find that similar workflows are from legacy CI solutions that are VM-based. Codefresh is a container-native solution, so if you have the opportunity you should create your pipelines from scratch when switching to Docker-based pipelines. - - -## Avoiding non-standard Dockerfiles - -We already established in the previous section that Dockerfiles should be self-contained. Another best practice is to make sure that all actions inside a Dockerfile are idempotent. - -There are several Dockerfiles that attempt to mimic a CI/CD system and perform non-standard actions such as: - -* Performing Git commits or other Git actions -* Cleaning up or tampering with database data -* Calling other external services with POST/PUT operations - -Not only does this make the pipeline much more complex (because retrying the pipeline now has consequences), but you also need to pass special credentials in the Dockerfile itself via the pipeline, making the pipeline even more complicated. - -You should avoid these kinds of directives inside a Dockerfile and simplify it so that all actions inside it are repeatable and non-destructive. -A Dockerfile should mainly: - -* Clone extra source code (if needed) -* Download dependencies -* Compile/package code -* Process/Minify/Transform local resources -* Run scripts and edit files on the container filesystem only - -As an example **TO AVOID**, this Dockerfile is also trying to run a SonarQube analysis - -`Dockerfile` -{% highlight docker %} -{% raw %} -FROM newtmitch/sonar-scanner AS sonar -COPY src src -RUN sonar-scanner -FROM node:11 AS build -WORKDIR /usr/src/app -COPY . . -RUN yarn install \ - yarn run lint \ - yarn run build \ - yarn run generate-docs -{% endraw %} -{% endhighlight %} - -This Dockerfile has the following issues: - -* It can run only where a SonarQube installation is available -* It needs extra credentials for the SonarQube instance -* If the SonarQube installation has issues, then the application build will also fail - -The proper way to build this Dockerfile is to make it package just the application: - -`Dockerfile` -{% highlight docker %} -{% raw %} -FROM node:11 AS build -WORKDIR /usr/src/app -COPY . . -RUN yarn install \ - yarn run lint \ - yarn run build \ - yarn run generate-docs -{% endraw %} -{% endhighlight %} - -And then move the SonarQube part to the actual pipeline: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -stages: - - prepare - - sonar - - build -steps: - main_clone: - title: Cloning main repository... - stage: prepare - type: git-clone - repo: 'my-github-repo/my-node-app' - revision: master - run_sonarqube: - title: Run SonarQube Analysis - stage: sonar - image: 'newtmitch/sonar-scanner' - environment: - - SONAR_TOKEN=my-sonar-token - commands: - - cd src - - sonar-scanner - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: my-node-image - working_directory: ./ - tag: 'master' - dockerfile: Dockerfile -{% endraw %} -{% endhighlight %} - -This makes the Docker build step as simple as possible. - -For more Docker best practices see our [Docker anti-patterns blog post](https://codefresh.io/containers/docker-anti-patterns/){:target="\_blank"}. - -## Pushing Docker images - -The build step in Codefresh is very smart and automatically also pushes your Docker image to your [default Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/#the-default-registry). - - -{% include image.html -lightbox="true" -file="/images/guides/build-docker-images/automatic-docker-push.png" -url="/images/guides/build-docker-images/automatic-docker-push.png" -alt="Automatic Docker push" -caption="Automatic Docker push" -max-width="80%" -%} - -Thus, if you run any of the above pipelines you can see the image created in the Docker image dashboard. - - -{% include image.html -lightbox="true" -file="/images/guides/build-docker-images/docker-image-dashboard.png" -url="/images/guides/build-docker-images/docker-image-dashboard.png" -alt="Docker image dashboard" -caption="Docker image dashboard" -max-width="80%" -%} - -For more details on how to push Docker images see the [working with Docker registries page]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/). - -## Running Docker images - -You can run Docker images inside a Codefresh pipeline using freestyle steps. You can use the freestyle step to run either an existing image from a private or public registry or even a Docker image that was created in the pipeline itself. - -This is a [very common pattern in Codefresh]({{site.baseurl}}/docs/pipelines/steps/freestyle/#dynamic-freestyle-steps) and works by simply mentioning the name of the build step that created the image. - -`codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -steps: - main_clone: - title: Cloning main repository... - stage: prepare - type: git-clone - repo: 'my-github-repo/my-helper-project' - revision: master - my_testing_tools: - title: Building Docker Image - type: build - image_name: my-own-testing-framework - run_tests: - title: Running Unit tests - image: ${{my_testing_tools}} - commands: - - ./my-unit-tests.sh -{% endraw %} -{% endhighlight %} - -For more details see [dynamic build tools]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#creating-docker-images-dynamically-as-build-tools), and [context variables]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables) - - - -## Related articles -[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) -[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) -[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) -[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) - - - - - - diff --git a/_docs/ci-cd-guides/environment-deployments.md b/_docs/ci-cd-guides/environment-deployments.md deleted file mode 100644 index bc845d97d..000000000 --- a/_docs/ci-cd-guides/environment-deployments.md +++ /dev/null @@ -1,690 +0,0 @@ ---- -title: "Deploying to predefined environments" -description: "Deploy to different production and staging environments from Codefresh pipelines" -group: ci-cd-guides -toc: true ---- - -With Codefresh, you can deploy a single application to multiple environments, such as, qa, staging, prod, and manage all of them with single or multiple pipelines. -This guide describes how an example application can be deployed with different configurations and various workflows for handling environment deployment. - -{% include image.html -lightbox="true" -file="/images/guides/promotion/image-promotion.png" -url="/images/guides/promotion/image-promotion.png" -alt="Using multiple environments" -caption="Using multiple environments" -max-width="80%" -%} - -## Prerequisites - -Before starting, you will need to: - - 1. [Create a Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) - 1. Get access to a Kubernetes cluster on any cloud provider - 1. [Connect the Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) to your account - 1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/){:target="\_blank"} and [helm](https://helm.sh/docs/intro/install/):target="\_blank"} and point them to your cluster - 1. Have [Docker](https://docs.docker.com/get-docker/){:target="\_blank"} installed locally (optional) - -## Example application - -As a running example, we will use a simple application with a Helm chart. [Helm is the package manager]({{site.baseurl}}/docs/deployments/helm/helm-best-practices/) for Kubernetes and has built-in support for passing different configuration settings for each environment. - -You can find the example Helm application at [https://github.com/codefresh-contrib/helm-promotion-sample-app](https://github.com/codefresh-contrib/helm-promotion-sample-app){:target="\_blank"}. If you want to follow along feel free to fork it on your own account. - -The application is a web page that prints out its own configuration as loaded from `/config/settings.ini`. -You can run the application locally on your own workstation with: - -``` -git clone https://github.com/codefresh-contrib/helm-promotion-sample-app.git -cd helm-promotion-sample-app -docker build . -t my-app -docker run -p 8080:8080 my-app -``` - -and then visit `http://localhost:8080` in your browser. - -In this example, we use a settings file in the [INI format](https://en.wikipedia.org/wiki/INI_file){:target="\_blank"}, but the same things apply with other configuration methods such as env files, Java properties, YAML/JSON configurations etc. - -### Different environment configurations - -The application includes a [Helm chart](https://github.com/codefresh-contrib/helm-promotion-sample-app/tree/master/chart/sample-app){:target="\_blank"} that contains values for three different environments: - -* [values-qa.yaml](https://github.com/codefresh-contrib/helm-promotion-sample-app/blob/master/chart/values-qa.yaml){:target="\_blank"} for the "QA" environment -* [values-staging.yaml](https://github.com/codefresh-contrib/helm-promotion-sample-app/blob/master/chart/values-staging.yaml){:target="\_blank"} for the "Staging" environment -* [values-prod.yaml](https://github.com/codefresh-contrib/helm-promotion-sample-app/blob/master/chart/values-prod.yaml){:target="\_blank"} for the "Production" environment - -The values contained in the files are both for the application (e.g. payment service URL), as well as the infrastructure level (number of replicas inside the cluster). -Note that the values for the application are dummy values that are not actually used by the application (they are simply shown in the web page). The number of replicas will take real effect on the cluster (the production configuration defines 2 replicas instead of 1). - ->For simplicity reasons, the chart of the application is hosted in the same Git repository as the source code. As an alternative, you could also have a second Git repository with just the chart. Codefresh supports both ways. - -### Manual deployment to different environments - -First let's run the application manually in all three environments. Later we will automate the whole process with Codefresh pipelines. We wil create each environment as a namespace in the cluster: - -``` -kubectl create namespace qa -kubectl create namespace staging -kubectl create namespace production -``` - -Then we will install a copy on the application on each environment with the different values - -``` -git clone https://github.com/codefresh-contrib/helm-promotion-sample-app.git -cd helm-promotion-sample-app/chart -helm install example-qa sample-app -n qa -f values-qa.yaml -helm install example-staging sample-app -n staging -f values-staging.yaml -helm install example-prod sample-app -n production -f values-prod.yaml -``` - -At this point all three copies of the application should be up. You might need to wait some time until all the load balancers are up. You can see the running URLs with: - -``` -kubectl get service -A -``` - -If you visit the URL of each service in your browser you will see how the application looks in each environment. - -{% include image.html -lightbox="true" -file="/images/guides/promotion/different-settings.png" -url="/images/guides/promotion/different-settings.png" -alt="Settings per environment" -caption="Settings per environment" -max-width="50%" -%} - -Note that the application uses a [Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/){:target="\_blank"} and this means extra costs on your cloud provider. When you are ready to clean up the application run the following: - -``` -helm uninstall example-staging -n staging -helm uninstall example-prod -n production -helm uninstall example-qa -n qa -``` - -Note that for this guide, all three environments run on the same cluster. In a real application, you should use a separate cluster for production, and never mix production and non-production workloads. Also notice that the chart refers to the `latest` tag of the application container which is **NOT** a recommended practice. In a real application the chart should specify a specific tag that is versioned. - -## Basic deployment pipeline for different environments - -Now that we have seen how manual deployment works, let's automate the whole process with Codefresh. We [will create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) that: - -1. Deploys all commits to the `master` branch in the production environment -1. Deploys all other commits to the staging environment - -Here is a commit to master looks like: - -{% include image.html -lightbox="true" -file="/images/guides/promotion/production-deployment.png" -url="/images/guides/promotion/production-deployment.png" -alt="Production deployment" -caption="Production deployment" -max-width="80%" -%} - -This is a very simple workflow perfect for small teams that follow Continuous Deployment. You can use the same pattern in other workflows such as [trunk based development]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#trunk-based-development). - -The pipeline has the following steps - -1. A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to get the source code plus the Helm chart -1. A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create and push the container image to Dockerhub -1. A [Helm step]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) to perform the deployment. The step has [pipeline conditions]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) to select which environment will be used. - -Here is the full pipeline: - -`codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - "clone" - - "build" - - "deployment" - -steps: - clone: - title: "Cloning repository" - type: "git-clone" - repo: "codefresh-contrib/helm-promotion-sample-app" - revision: '${{CF_REVISION}}' - stage: "clone" - - build: - title: "Building Docker image" - type: "build" - image_name: "kostiscodefresh/helm-promotion-app" - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: "Dockerfile" - stage: "build" - registry: dockerhub - deployStaging: - title: Deploying to Staging - type: helm - stage: deployment - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-staging - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: staging - custom_value_files: - - ./chart/values-staging.yaml - when: - branch: - ignore: - - master - deployProd: - title: Deploying to Production - type: helm - stage: deployment - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-prod - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: production - custom_value_files: - - ./chart/values-prod.yaml - when: - branch: - only: - - master -{% endraw %} -{% endhighlight %} - -To test the pipeline and see how it behaves with different environments: - -1. Fork the [Git repository](https://github.com/codefresh-contrib/helm-promotion-sample-app){:target="\_blank"} to your own GitHub account -1. Commit a dummy change in the `master` branch, and you will see a deployment to the production namespace -1. Commit a dummy change to the `staging` branch or any other branch of your choosing, and you will see a deployment to the staging namespace. - -Here is how the pipeline looks when a commit happens to a branch that is not `master`: - -{% include image.html -lightbox="true" -file="/images/guides/promotion/non-production-deployment.png" -url="/images/guides/promotion/non-production-deployment.png" -alt="Staging deployment" -caption="Staging deployment" -max-width="80%" -%} - -As you can see the step that deploys to production is now skipped, and the step that deploys to staging is enabled. - -This is a great starting point for your own workflows. Codefresh can handle more complicated scenarios as you will see in the later sections. - ->Note that for brevity reasons, the pipeline deploys the Helm chart directly from the Git repo. In an actual pipeline, you [should also store the Helm chart -in a Helm repository]({{site.baseurl}}/docs/ci-cd-guides/helm-best-practices/#packagepush-and-then-deploy). - -For more details on Helm deployments see our [dedicated Helm example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). - -## Viewing your Helm Releases - -The previous pipeline works great as an automation mechanism. Wouldn't it be great if you could also *visualize* your deployments? -Codefresh includes a [Helm release dashboard]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) to help you understand your deployments. - -1. In the Codefresh UI, from the Ops section in the sidebar, select [Helm Releases](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. - -{% include image.html -lightbox="true" -file="/images/guides/promotion/helm-releases.png" -url="/images/guides/promotion/helm-releases.png" -alt="Helm releases" -caption="Helm releases" -max-width="80%" -%} - -{:start="2"} -1. To get extra information such as the services exposed and active replicas for a release, click on the release. - -{% include image.html -lightbox="true" -file="/images/guides/promotion/services.png" -url="/images/guides/promotion/services.png" -alt="Helm service information" -caption="Helm service information" -max-width="80%" -%} - - In the History tab, you can view the deployment history, and even [rollback]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/#rolling-back-a-helm-release) to a previous release: - -{% include image.html -lightbox="true" -file="/images/guides/promotion/history.png" -url="/images/guides/promotion/history.png" -alt="Helm deployment history" -caption="Helm deployment history" -max-width="80%" -%} - - And most importantly in the Values tab, the values applied for each release. - This way you can also verify that the correct values are applied to the respective environment. - -{% include image.html -lightbox="true" -file="/images/guides/promotion/helm-values.png" -url="/images/guides/promotion/helm-values.png" -alt="Helm values used" -caption="Helm values used" -max-width="80%" -%} - - - - -## Using the Environment dashboard -Codefresh also includes an optional [environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) that you can use to track down your environments and their current status. The dashboard is especially helpful if you have a large number of environments. - -{% include -image.html -lightbox="true" -file="/images/guides/environments/environments.png" -url="/images/guides/environments/environments.png" -alt="Codefresh Environment Dashboard" -caption="Codefresh Environment Dashboard" -max-width="70%" -%} - - -To activate your environment dashboard you need to add an [env block]({{site.baseurl}}/docs/pipelines/deployment-environments/) to each of the deployment steps in the pipeline. -Here is the whole pipeline: - - -`codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - "clone" - - "build" - - "deployment" - -steps: - clone: - title: "Cloning repository" - type: "git-clone" - repo: "codefresh-contrib/helm-promotion-sample-app" - revision: '${{CF_REVISION}}' - stage: "clone" - - build: - title: "Building Docker image" - type: "build" - image_name: "kostiscodefresh/helm-promotion-app" - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: "Dockerfile" - stage: "build" - registry: dockerhub - deployStaging: - title: Deploying to Staging - type: helm - stage: deployment - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-staging - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: staging - custom_value_files: - - ./chart/values-staging.yaml - when: - branch: - ignore: - - master - env: - name: Acme Staging - endpoints: - - name: app - url: https://staging.example.com - type: helm-release - change: ${{CF_COMMIT_MESSAGE}} - filters: - - cluster: 'mydemoAkscluster@BizSpark Plus' - releaseName: example-staging - deployProd: - title: Deploying to Production - type: helm - stage: deployment - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-prod - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: production - custom_value_files: - - ./chart/values-prod.yaml - when: - branch: - only: - - master - env: - name: Acme Production - endpoints: - - name: app - url: https://production.example.com - type: helm-release - change: ${{CF_COMMIT_MESSAGE}} - filters: - - cluster: 'mydemoAkscluster@BizSpark Plus' - releaseName: example-prod -{% endraw %} -{% endhighlight %} - - -Notice that we use the `CF_COMMIT_MESSAGE` [variable]({{site.baseurl}}/docs/pipelines/variables/) to annotate each environment with each build message. After you deploy at least once to each environment, you should see the following in your [Environment dashboard](https://g.codefresh.io/environments). - -{% include image.html -lightbox="true" -file="/images/guides/promotion/deployment-dashboard.png" -url="/images/guides/promotion/deployment-dashboard.png" -alt="Environment inspection" -caption="Environment inspection" -max-width="80%" -%} - -Just by looking at the builds of each environment, it is clear that the staging environment is one commit ahead (for feature 4689). -Clicking an environment shows several details such as active services, deployment history, rollback options, manifests rendered etc as in the Helm releases page. - -## Using Approvals in a pipeline - -Deploying straight to production after a commit is a worthy goal, but not all organizations want to work like this. In several cases, a human must approve a production deployment with a manual step. - -An alternative pipeline pattern is to have a single pipeline that automatically deploys to the "staging" environment but pauses before releasing to production. - -{% include image.html -lightbox="true" -file="/images/guides/promotion/with-approval.png" -url="/images/guides/promotion/with-approval.png" -alt="Asking for approval before a production deployment" -caption="Asking for approval before a production deployment" -max-width="80%" -%} - -Once the pipeline is paused, all project stakeholders can examine the state of the application in the staging environment (either manually or by running automated tests), and if everything looks good, promote the application to production. - -This is easily accomplished through the [Codefresh approval step]({{site.baseurl}}/docs/pipelines/steps/approval/). The pipeline is stopped, and a yes/no button is shown in the UI. The pipeline can continue only if approved by selecting `yes`. - -Here is the whole pipeline: - -`codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - "clone" - - "build" - - "staging" - - "prod" - -steps: - clone: - title: "Cloning repository" - type: "git-clone" - repo: "codefresh-contrib/helm-promotion-sample-app" - revision: '${{CF_REVISION}}' - stage: "clone" - - build: - title: "Building Docker image" - type: "build" - image_name: "kostiscodefresh/helm-promotion-app" - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: "Dockerfile" - stage: "build" - registry: dockerhub - deployStaging: - title: Deploying to Staging - type: helm - stage: staging - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-staging - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: staging - custom_value_files: - - ./chart/values-staging.yaml - askForPermission: - type: pending-approval - stage: prod - title: Deploy to production? - deployProd: - title: Deploying to Production - type: helm - stage: prod - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-prod - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: production - custom_value_files: - - ./chart/values-prod.yaml -{% endraw %} -{% endhighlight %} - -The approval step has many more options such as a timeout or even choosing a different flow in the pipeline if the approval is declined. - -## Using multiple pipelines for deployments - -Having a single pipeline that deals with all deployment environments can work great with a small team. As an organization grows, and more steps are added to the pipeline, it becomes very hard to use conditions to enable/disable specific steps in pipelines. - -With Codefresh, you can create as many pipelines as you want for a single project. It is therefore very easy to employ different simple pipelines for specific purposes, instead of working with a complex monolithic pipeline. - -In our example we will create two pipelines: - -1. The "staging" pipeline performs linting and security scans in the source code before creating the Docker image -1. The "production" pipeline runs integration tests *after* the creation of the Docker image - -Here is how the staging pipeline looks: - -{% include image.html -lightbox="true" -file="/images/guides/promotion/staging-pipeline.png" -url="/images/guides/promotion/staging-pipeline.png" -alt="A pipeline only for staging deployments" -caption="A pipeline only for staging deployments" -max-width="80%" -%} - -This pipeline uses [parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/#inserting-parallel-steps-in-a-sequential-pipeline) to run linting and security scanning at the same time. - -Here is the whole pipeline: - -`codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - "clone" - - "validate" - - "build" - - "staging" - -steps: - clone: - title: "Cloning repository" - type: "git-clone" - repo: "codefresh-contrib/helm-promotion-sample-app" - revision: '${{CF_REVISION}}' - stage: "clone" - prechecks: - type: parallel - stage: 'validate' - steps: - lint: - title: Lint - working_directory: "${{clone}}" - image: golangci/golangci-lint:v1.33.0 - commands: - - golangci-lint run -v . - securityAnalysis: - title: Security Scan - working_directory: "${{clone}}" - image: 'securego/gosec:v2.5.0' - commands: - - gosec ./... - build: - title: "Building Docker image" - type: "build" - image_name: "kostiscodefresh/helm-promotion-app" - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: "Dockerfile" - stage: "build" - registry: dockerhub - - deployStaging: - title: Deploying to Staging - type: helm - stage: staging - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-staging - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: staging - custom_value_files: - - ./chart/values-staging.yaml -{% endraw %} -{% endhighlight %} - -The production pipeline assumes that the code has been scanned/validated already, and runs some integration tests as a final validation check before deploying the release to production: - -{% include image.html -lightbox="true" -file="/images/guides/promotion/production-pipeline.png" -url="/images/guides/promotion/production-pipeline.png" -alt="A pipeline only for production deployments" -caption="A pipeline only for production deployments" -max-width="80%" -%} - -This pipeline uses [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) to run [integration tests]({{site.baseurl}}/docs/testing/integration-tests/). - -Here is the whole pipeline: - -`codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - "clone" - - "build" - - "testing" - - "prod" - -steps: - clone: - title: "Cloning repository" - type: "git-clone" - repo: "codefresh-contrib/helm-promotion-sample-app" - revision: '${{CF_REVISION}}' - stage: "clone" - build_app_image: - title: "Building Docker image" - type: "build" - image_name: "kostiscodefresh/helm-promotion-app" - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: "Dockerfile" - stage: "build" - registry: dockerhub - myTests: - title: Integration Tests - type: freestyle - working_directory: "${{clone}}" - stage: "testing" - arguments: - image: 'byrnedo/alpine-curl' - commands: - - "curl http://app:8080/health" - services: - composition: - app: - image: '${{build_app_image}}' - ports: - - 8080 - deployProd: - title: Deploying to Production - type: helm - stage: prod - working_directory: ./helm-promotion-sample-app - arguments: - action: install - chart_name: ./chart/sample-app - release_name: example-prod - helm_version: 3.0.2 - kube_context: 'mydemoAkscluster@BizSpark Plus' - namespace: production - custom_value_files: - - ./chart/values-prod.yaml -{% endraw %} -{% endhighlight %} - -Now that you have created the pipelines, you have several options on how to trigger them. -Some common workflows are: - -1. Automate the staging pipeline when a commit lands in `master`, and only launch the production pipeline manually. -1. Automate the staging pipeline when a commit lands in `master`, and use an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to call the production pipeline as a [child pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/call-child-pipelines/). -1. Set the [trigger]({{site.baseurl}}/docs/pipeline/triggers/git-triggers/) of the production pipeline to [launch only]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#restricting-which-branches-to-build) on `master`, and the trigger of the staging pipeline to launch only for `non-master` branches. -1. Set the production pipeline to launch only for commits on `master`, and the staging pipeline only for pull requests (PRs). - -The exact mechanism depends on the workflow of your team. For more information, see [the guide on branches and pull requests]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/), especially [trunk based development]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#trunk-based-development), as a good starting point. - -## Promoting releases between environments - -If you have a large number of environments, we also suggest looking at the Helm promotion board provided by Codefresh. -For more details, see [Helm promotion board]({{site.baseurl}}/docs/new-helm/helm-environment-promotion/). - - -{% include -image.html -lightbox="true" -file="/images/guides/environments/board.png" -url="/images/guides/environments/board.png" -alt="Helm Promotion Dashboard" -caption="Helm Promotion Dashboard" -max-width="80%" -%} - - -## Related articles -[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) -[Pull requests and branches]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/) -[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) - - - - - diff --git a/_docs/ci-cd-guides/gitops-deployments.md b/_docs/ci-cd-guides/gitops-deployments.md index 14f5ad199..614f8b3f9 100644 --- a/_docs/ci-cd-guides/gitops-deployments.md +++ b/_docs/ci-cd-guides/gitops-deployments.md @@ -680,8 +680,9 @@ and load them from the repository. Once the pipeline is in Git, you should switch the online editor to [load the pipeline from the repository]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#loading-codefreshyml-from-version-control) instead of the inline text. -## Related articles -[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) -[ArgoCD integration]({{site.baseurl}}/docs/integrations/argocd/) -[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) -[Helm promotions]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) +## What to read next + +* [Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) +* [ArgoCD integration]({{site.baseurl}}/docs/integrations/argocd/) +* [Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +* [Helm promotions]({{site.baseurl}}/docs/new-helm/helm-environment-promotion/) diff --git a/_docs/ci-cd-guides/helm-best-practices.md b/_docs/ci-cd-guides/helm-best-practices.md deleted file mode 100644 index 317c4ad54..000000000 --- a/_docs/ci-cd-guides/helm-best-practices.md +++ /dev/null @@ -1,366 +0,0 @@ ---- -title: "Helm best practices" -description: "High-level overview of Helm workflows" -group: ci-cd-guides -redirect_from: - - /docs/new-helm/best-practices/ -toc: true ---- - -[Helm](https://helm.sh){:target="\_blank"} is a package manager for Kubernetes (think `apt` or `yum`). It works by combining several manifests into a single package called [a chart](https://helm.sh/docs/developing_charts/){:target="\_blank"}. -Helm also supports storing charts in remote or local Helm repositories that function as package registries, such as Maven Central, Ruby Gems, NPM registry, etc. - -Helm is currently the only solution that supports: - -* Grouping related Kubernetes manifests in a single entity (the chart) -* Basic templating and values for Kubernetes manifests -* Dependency declaration between applications (chart of charts) -* A registry of available applications to be deployed (Helm repository) -* A view of a Kubernetes cluster at the application/chart level -* Managing of chart installation/upgrades as a whole -* Built-in rollback of a chart to a previous version without running a CI/CD pipeline again - -You can find a list of public curated charts in the default [Helm repository](https://github.com/helm/charts/tree/master/stable){:target="\_blank"}. - -Several third-party tools support Helm chart creation such as [Draft](https://draft.sh/){:target="\_blank"}. Local Helm development -is also supported by [garden.io](https://docs.garden.io/using-garden/using-helm-charts){:target="\_blank"}, and/or [skaffold](https://skaffold.dev/docs/how-tos/deployers/#deploying-with-helm){:target="\_blank"}. Check your favorite tool for native Helm support. - -Codefresh also has built-in support for Helm: -* [Packages]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) -* [Deployments]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) -* [Repositories]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) -* [Environments]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) - -## Helm concepts - -The [official docs](https://helm.sh/docs/using_helm/){:target="\_blank"} do a good job of explaining the basic concepts. -The table below focuses on some important points. - -Helm Concept|Description| Important point ----|--- | --- -Chart (unpackaged) | A folder with files that follow the Helm chart guidelines. | Can be deployed directly to a cluster | -Chart (packaged) | A `tar.gz` archive of the above. | Can be deployed directly to a cluster | -Chart name | Name of the package as defined in `Chart.yaml` | Part of package identification | -Templates | A set of Kubernetes manifests that form an application. | `Go` templates can be used | -Values | Settings that can be parameterized in Kubernetes manifests. | Used for templating of manifests | -Chart version | The version of the package/chart. | Part of package identification | -App version | The version of the application contained in the chart. | **Independent from chart version** | -Release | A deployed package in a Kubernetes cluster. | **Multiple releases of the same chart can be active**| -Release name | An arbitrary name given to the release. | **Independent from name of chart** | -Release Revision | A number that gets incremented each time an application is deployed/upgraded.| **Unrelated to chart version**| -Repository | A file structure (HTTP server) with packages and an `index.yaml` file. | Helm charts can be deployed **without** being first fetched from a repository | -Installing | Creating a brand-new release from a Helm chart (either unpackaged, packaged or from a repo). | | -Upgrading | Changing an existing release in a cluster | Can be upgraded to any version (even the same) | -Rolling back | Going back to a previous revision of a release. | Helm handles the rollback, no need to rerun pipeline | -Pushing | Storing a Helm package on a repository. | Chart will be automatically packaged | -Fetching | Downloading a Helm package from a repository to the local filesystem. | | - -## Common Helm misconceptions - -Any new technology requires training on how to use it effectively. If you have already worked with any type of package manager, you should be familiar with how Helm works. - -Here is a list of important Helm points that are often controversial between teams. - -### Helm repositories are optional - -Using Helm repositories is a recommended practice, but completely optional. You can deploy a Helm chart to a Kubernetes cluster directly from the filesystem. The [quick start guide]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/) describes this scenario. - -Helm can install a chart either in the package (`.tgz`) or unpackaged (tree of files) to a Kubernetes cluster right away. Thus, the most minimal Helm pipeline has only two steps: - -1. Check out from Git a Helm chart described in uncompressed files. -1. Install this chart to a Kubernetes cluster. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/helm-direct-deployment.png" -url="/images/guides/helm-best-practices/helm-direct-deployment.png" -alt="Simplest Helm pipeline" -caption="Simplest Helm pipeline" -max-width="70%" -%} - -You will see in the next section more efficient workflows, but the fact remains that Helm repositories are optional. There is **no** technical requirement that a Helm chart must be uploaded to a Helm repository before being deployed to a cluster. - -### Chart versions and appVersions - -Each Helm chart has the ability to define two separate versions: - -1. The version of the chart itself (`version` field in `Chart.yaml`). -1. The version of the application contained in the chart (`appVersion` field in `Chart.yaml`). - -These are unrelated and can be bumped up in any manner that you see fit. You can sync them together or have them increase independently. There is no right or wrong practice here as long as you stick into one. We will see some versioning strategies in the next section. - -### Charts and sub-charts - -The most basic way to use Helm is by having a single chart that holds a single application. The single chart will contain all the resources needed by your application such as deployments, services, config-maps etc. - -However, you can also create a chart with dependencies to other charts (a.k.a. umbrella chart), which are completely external using the `requirements.yaml` file. Using this strategy is optional and can work well in several organizations. Again, there is no definitive answer on right and wrong here, it depends on your team process. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/chart-structure.png" -url="/images/guides/helm-best-practices/chart-structure.png" -alt="Possible Helm structures" -caption="Possible Helm structures" -max-width="70%" -%} - -We will see some scenarios in the next sections on why you would want to use umbrella charts. - - -### Helm vs K8s templates - -Helm is a package manager that also happens to include templating capabilities. Unfortunately, a lot of people focus only on the usage of Helm as a template manager and nothing else. - -Technically Helm can be used as only a templating engine by stopping the deployment process in the manifest level. It is perfectly possible to use Helm only to [create plain Kubernetes manifests](https://helm.sh/docs/helm/#helm-template){:target="\_blank"} and then install them on the cluster using the standard methods (such as `kubectl`). But then you miss all the advantages of Helm (especially the application registry aspect). - -At the time of writing Helm is the only package manager for Kubernetes, so if you want a way to group your manifests and a registry of your running applications, there are no off-the-shelf alternative apart from Helm. - -Here is a table that highlights the comparison: - -Helm Feature|Alternative| ----|--- -Templating | Kustomize, k8comp, kdeploy, ktmpl, kuku, jinja, sed, awk, etc. -Manifest grouping (entity/package) | None -Application/package dependencies | None -Runtime view of cluster packages | None -Registry of applications | None -Direct rollbacks and Upgrades | None - - - - -## Helm pipelines - -With the basics out of the way, we can now see some typical Helm usage patterns. Depending on the size of your company and your level of involvement with Helm you need to decide which practice is best for you. - - -### Deploy from an unpackaged chart - -This is the simplest pipeline for Helm. The Helm chart is in the same Git repository as the source code of the application. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/helm-no-repo.png" -url="/images/guides/helm-best-practices/helm-no-repo.png" -alt="Using Helm without a Helm repository" -caption="Using Helm without a Helm repository" -max-width="70%" -%} - -The steps are the following: - -1. Code/Dockerfile/Chart is checked out from Git -1. Docker image is built (and pushed to [default Docker registry]({{site.baseurl}}/docs/integration/docker-registries/#the-default-registry)) -1. Chart is [deployed directly]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#example-installing-a-chart) to a Kubernetes Cluster - -Notice that in this pipeline there is no Helm repository involved. - -> We recommend this workflow only while you are learning Helm. Storing your Helm charts in a Helm repository is a better practice as described in the next section. - -### Package/push and then deploy - -This is the recommended approach when using Helm. First, you package and push the Helm chart into a repository, and then you deploy it to your cluster. -This way your Helm repository shows a registry of the applications that run on your cluster. You can also reuse the charts to deploy to other environments (described later in this page). - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/basic-helm-pipeline.png" -url="/images/guides/helm-best-practices/basic-helm-pipeline.png" -alt="Basic Helm application pipeline" -caption="Basic Helm application pipeline" -max-width="70%" -%} - -The Helm chart can be either in the same Git repository as the source code (as shown above) or in a different one. -Note that this workflow assumes that you [have attached a Helm repository]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#step-4-optional---import-the-helm-configuration-in-your-pipeline-definition) configuration in the pipeline. - -If you use the [Codefresh Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) you can see all your releases in the Codefresh UI. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/helm-catalog.png" -url="/images/guides/helm-best-practices/helm-catalog.png" -alt="Helm application catalog" -caption="Helm application catalog" -max-width="70%" -%} - - -This approach allows you also to reuse Helm charts. After you publish a Helm chart, in the Helm repository you can deploy it to another environment (with a pipeline or manually) using different values. - - -### Separate Helm pipelines - -Even though packaging and deploying a release in a single pipeline is the recommended approach, several companies have two different processes for packaging and releasing. - -In this case, you can create two pipelines. One that packages the Helm chart and uploads it to a Helm repository, and another one that deploys to a cluster from the Helm chart. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/push-and-deploy.png" -url="/images/guides/helm-best-practices/push-and-deploy.png" -alt="Push and deploy in different pipelines" -caption="Push and deploy in different pipelines" -max-width="70%" -%} - -While this approach offers flexible releases (as one can choose exactly what is released and what is not), it also raises the complexity of deployments. You need to pass parameters on the deployment pipeline to decide which chart version will be deployed. - -In Codefresh, you can also have the two pipelines automatically [linked together]({{site.baseurl}}/docs/integrations/codefresh-api/#using-codefresh-from-within-codefresh). - -### Using Helm rollbacks - -Helm has the native capability of [rolling back](https://helm.sh/docs/helm/#helm-rollback){:target="\_blank"} a *release* to any previous *revision*. This can be done -manually or via the [Codefresh UI]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/#helm-releases-overview -). - -A more advanced usage would be to automatically rollback a release if it "fails". - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/helm-rollback.png" -url="/images/guides/helm-best-practices/helm-rollback.png" -alt="Automatic Helm rollback" -caption="Automatic Helm rollback" -max-width="70%" -%} - -In the example pipeline above, after deployment, we run some smoke tests/health checks. If they fail, -then the rollback step is executed using [pipeline conditionals]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/). - -Alternatively, you can run any other [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) after a deployment such as health checks, metric collection, load testing, etc. that decides if a deployment if a Helm rollback is needed or not. - -Integrating automatic Helm rollbacks can be used in all kinds of Helm workflows that were described in this section. - - -## Helm packaging strategies - -As mentioned before a Helm chart version is completely different than the application version it contains. This means that you can track versions on the Helm chart itself separately from the applications it defines. - -### Simple 1-1 versioning - -This is the most basic versioning approach, and it is the suggested one if you are starting out with Helm. -Don't use the `appVersion` field at all (it is optional anyway) and just keep the chart version in sync with your actual application. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/chart-version-single.png" -url="/images/guides/helm-best-practices/chart-version-single.png" -alt="Synced versions in Helm" -caption="Synced versions in Helm" -max-width="60%" -%} - -This approach makes version bumping very easy (you bump everything up) and also allows you to quickly track -what application version is deployed on your cluster (same as chart version). - -The downside of this approach is that you can't track chart changes separately. - -### Chart versus application versioning - -This is an advanced approach which you should adopt if changes are happening in the charts themselves all the time (i.e. in the templates) and you want to track them separately from the application. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/chart-version-multiple.png" -url="/images/guides/helm-best-practices/chart-version-multiple.png" -alt="Independent Helm versioning" -caption="Independent Helm versioning" -max-width="90%" -%} - -An important point here is that you need to adopt a policy in your team on what a "chart change" means. Helm does not enforce chart version changes. You can deploy a different chart with the same version as the previous one. So, if this is something that you want to do, you need to make sure that all teams are on the same page for versioning practices. - -On the plus side, this workflow allows you to individually version charts and applications and is very flexible for companies with teams that manage separately the charts from the application source code. - - -### Umbrella charts - -Umbrella charts are charts of charts. They add an extra layer of complexity on both previous approaches. -You can follow the same paradigms in umbrella charts. Either the parent chart has the same version as everything else (first approach) or it has a version on its own. - -In the second case, you need to agree with your team on when exactly the parent chart version should be bumped. Is it only when a child chart changes? Only when an application changes? or both? - -The answer does not really matter as long as your team follows the same rules. - -## Helm promotion strategies - -A Helm chart (like a Docker image) should be promoted between environments. It should start with testing and staging environments and gradually move to production ones. - -### Single repository with multiple environments - -This is the most basic deployment workflow. You have a single Helm chart (which is exactly the same across all environments). -It is deployed to multiple targets using a different set of values. - -{% include image.html -lightbox="true" -file="/images/guides/helm-best-practices/multiple-environments.png" -url="/images/guides/helm-best-practices/multiple-environments.png" -alt="Deploy to multiple environments with Helm" -caption="Deploy to multiple environments with Helm" -max-width="90%" -%} - -Codefresh has several ways to override the values for each environment within a [pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#helm-values). - - -### Chart promotion between environments - -This is the recommended deployment workflow. Codefresh can store different Helm values per environment in the [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/#using-shared-helm-values) mechanism. -Then you view and manage releases from the [Helm environments dashboard]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/). - -{% include -image.html -lightbox="true" -file="/images/guides/helm-best-practices/board.png" -url="//images/guides/helm-best-practices/board.png" -alt="Helm Environment Dashboard" -caption="Helm Environment Dashboard" -max-width="80%" -%} - -Then once you promote a Helm release either from the GUI, or the pipeline you can select exactly which configuration set of parameters you want to use: - -{% include -image.html -lightbox="true" -file="/images/guides/helm-best-practices/value-options.png" -url="/images/guides/helm-best-practices/value-options.png" -alt="Changing deployment values" -caption="Changing deployment values" -max-width="40%" -%} - -This workflow has two big advantages: - -1. You get a visual overview on what and where each Helm release is installed on -1. You can promote releases without running the initial CI/CD pipeline (that created the chart) - -### Chart promotion between repositories and environments - -A more advanced workflow (useful in organizations with multi-location deployments) is the promotion of Helm releases between both [repositories]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) and environments. - -{% include -image.html -lightbox="true" -file="/images/guides/helm-best-practices/advanced-promote.png" -url="/images/guides/helm-best-practices/advanced-promote.png" -alt="Advanced Helm promotion" -caption="Advanced Helm promotion" -max-width="90%" -%} - -There are different pipelines for: - -1. Creating the Helm chart and storing it to a staging Helm repository (i.e. the Codefresh Helm repository) -1. Deployment of the Helm chart to a staging environment. After it is tested *the chart* is promoted to one or more "production" Helm repositories -1. Deployment of the promoted Helm chart happens to one of the production environments - -While this workflow is very flexible, it adds complexity on the number of Helm charts available (since they exist in multiple Helm repositories). You also need to set up the parameters between the different pipelines so that Helm charts to be deployed can be indeed found in the expected Helm repository. - -## Related articles -[Helm quick start guide]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/) -[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) -[Helm Dashboard]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) -[Helm Promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) diff --git a/_docs/ci-cd-guides/kubernetes-templating.md b/_docs/ci-cd-guides/kubernetes-templating.md deleted file mode 100644 index 510582f67..000000000 --- a/_docs/ci-cd-guides/kubernetes-templating.md +++ /dev/null @@ -1,214 +0,0 @@ ---- -title: "Simple Kubernetes templating" -description: "Use templates in your Kubernetes manifests" -group: ci-cd-guides -toc: true ---- - -Once you start working with Kubernetes you will see the need for using templates in Kubernetes manifests for common parameters such as: - -* The docker image name of a deployment -* The docker image tag of a deployment -* Number of replicas -* Service labels -* Configmaps and other settings - -Kubernetes does not provide any templating mechanism on its own. Deployed manifests are expected to be static yaml files. An external solution is needed if you want to pass parameters in your manifests. - -The proper way to handle templates is within [Helm]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/). Helm is the package manager for Kubernetes and also includes templating capabilities. - -To use templates without using Helm, there are several templating solutions available including [Kustomize](https://github.com/kubernetes-sigs/kustomize){:target="\_blank"} from Google. - -Codefresh also includes its own simple templating mechanism that has built-in integration with all [pipeline variables]({{site.baseurl}}/docs/pipelines/variables/) as we will explain in this page. - -## Using the Codefresh deploy image - -Codefresh offers a public docker image at [https://hub.docker.com/r/codefresh/cf-deploy-kubernetes/tags/](https://hub.docker.com/r/codefresh/cf-deploy-kubernetes/tags/){:target="\_blank"} for easy templating of Kubernetes manifests. The source code of the image is at [https://github.com/codefresh-io/cf-deploy-kubernetes](https://github.com/codefresh-io/cf-deploy-kubernetes){:target="\_blank"}. This image can be used in a freestyle step like this: - -`YAML` -{% highlight yaml %} -{% raw %} - MyDeploy: - title: K8s Deploy - image: codefresh/cf-deploy-kubernetes:master - commands: - - /cf-deploy-kubernetes deployment.yml - environment: - - KUBECONTEXT=my-cluster-name - - KUBERNETES_NAMESPACE=my-namespace -{% endraw %} -{% endhighlight %} - -The step accepts the following environment variables: - -* `KUBECONTEXT`: Corresponds to the name of a cluster added to codefresh. -* `KUBERNETES_NAMESPACE`: The namespace to which to deploy. -* `KUBECTL_ACTION`: An action for `kubectl `. Valid values are `apply|create|replace` (default is `apply`). -* `KUBERNETES_DEPLOYMENT_TIMEOUT`: The duration to wait for a successful deployment before failing the build (defaults to 120 secs). - -The step will deploy your deployment to the cluster specified by the context and namespace given. The name of the context is the name of your cluster as seen in the [Kubernetes dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services). - -Before the deployment takes place, all Codefresh variables found in the `deployment.yml` file in the form of {% raw %}`{{MY_VARIABLE}}`{% endraw %} will be automatically replaced with their current values. - -Here is an example manifest: - -`Kubernetes manifest` -{% highlight yaml %} -{% raw %} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: my-demo-app - annotations: - branch: {{CF_BRANCH_TAG_NORMALIZED}} - source-repository: {{CF_REPO_NAME}} -spec: - replicas: 4 - template: - metadata: - labels: - name: my-demo-app - app: my-demo-app - spec: - containers: - - name: my-demo-app - image: r.cfcr.io/{{CF_ACCOUNT}}/my-sample-application:{{CF_SHORT_REVISION}} - imagePullPolicy: Always - ports: - - name: http - containerPort: 8080 - protocol: TCP -{% endraw %} -{% endhighlight %} - -In this case the image will get the replacement for your Codefresh account name and the tag will use the git revision. Metadata annotations are also defined with value from the branch name and the git repository name. - -Notice that the variables are declared as {% raw %}`{{MY_VARIABLE}}`{% endraw %} form and **NOT** {% raw %}`${{MY_VARIABLE}}`{% endraw %} which is how they are used inside the [Codefresh yaml]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) definition. - - -## Creating custom manifest replacements - -Apart from the built-in [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/) you can also create any variable on your own using the same replacement syntax. - -Here is an example manifest. - -`Kubernetes manifest` -{% highlight yaml %} -{% raw %} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: my-demo-app - annotations: - source-repository: {{CF_REPO_NAME}} - branch: {{CF_BRANCH_TAG_NORMALIZED}} - custom-label: {{MY_CUSTOM_LABEL}} -spec: - replicas: {{MY_REPLICA_NUMBER}} - template: - metadata: - labels: - name: my-demo-app - app: my-demo-app - spec: - containers: - - name: my-demo-app - image: r.cfcr.io/{{CF_ACCOUNT}}/my-sample-application:{{CF_SHORT_REVISION}} - imagePullPolicy: Always - ports: - - name: http - containerPort: 8080 - protocol: TCP - imagePullSecrets: - - name: {{PULL_SECRET}} -{% endraw %} -{% endhighlight %} - -Here you can see custom variables for an annotation, the replica number and the pull secret (in addition with the standard variables). -You can provide the values for your custom variables as environment parameters in the freestyle step. - -`codefresh.yaml` -{% highlight yaml %} -{% raw %} -version: '1.0' -steps: - BuildingDockerImage: - title: Building Docker Image - type: build - image_name: my-sample-application - tag: '${{CF_SHORT_REVISION}}' - dockerfile: Dockerfile - MyDeploy: - title: K8s Deploy - image: codefresh/cf-deploy-kubernetes:master - commands: - - /cf-deploy-kubernetes deployment.yml - environment: - - KUBECONTEXT=k8s-demo@Google - - KUBERNETES_NAMESPACE=my-namespace - - MY_CUSTOM_LABEL=build-id-${{CF_BUILD_ID}} - - MY_REPLICA_NUMBER=3 - - PULL_SECRET=codefresh-generated-r.cfcr.io-cfcr-my-namespace -{% endraw %} -{% endhighlight %} - -In the environment section you can see the values for the custom variables. We set the replica number to 3, a full string for the pull secret and a concatenated string for the annotation. - -## Using replacements in multiple manifests - -By default, the deploy step will only do replacements in a single manifest. If you have multiple Kubernetes manifests you can merge all of them in a single file, or use multiple times the deploy commands like this: - -`codefresh.yml` -{% highlight yaml %} -{% raw %} - MyDeploy: - title: K8s Deploy - image: codefresh/cf-deploy-kubernetes:master - commands: - - /cf-deploy-kubernetes deployment.yml - - /cf-deploy-kubernetes service.yml - - /cf-deploy-kubernetes config-map.yml - environment: - - KUBECONTEXT=my-cluster-name - - KUBERNETES_NAMESPACE=my-namespace - - MY_REPLICA_NUMBER=3 - - KUBERNETES_DEPLOYMENT_TIMEOUT=360 -{% endraw %} -{% endhighlight %} - -Variable replacements will happen in all manifests before they are deployed. - - -## Using Unix command line tools for templates - -It is also perfectly possible to use any Unix templating or text editing tool such as `sed` or `awk` to perform text replacements in Kubernetes manifests. - -As a very simple example you could a replacement with the following [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) in your Codefresh pipeline. - -`YAML` -{% highlight yaml %} -{% raw %} -version: '1.0' -steps: - my_replacement: - image: alpine - commands: - # replace every ${TAG} with current TAG variable value - - sed -i 's/${TAG}/${{TAG}}/g' my-k8s-deployment.yaml -{% endraw %} -{% endhighlight %} - -## Related articles -[Connnecting to your cluster]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/) -[Managing your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) -[Accessing a docker registry]({{site.baseurl}}/docs/deployments/access-docker-registry-from-kubernetes/) -[Running custom kubectl commands]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/) - - - - - - - - - \ No newline at end of file diff --git a/_docs/ci-cd-guides/microservices.md b/_docs/ci-cd-guides/microservices.md deleted file mode 100644 index 03b243f76..000000000 --- a/_docs/ci-cd-guides/microservices.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: "Building microservices" -description: "Create pipelines specifically for microservice applications" -group: ci-cd-guides -toc: true ---- - -Now that you know how to [build your app]({{site.baseurl}}/docs/ci-cd-guides/packaging-compilation/) and [create Docker images]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/), we can see how Codefresh works with microservice applications. - -## Organizing pipelines for monolithic applications - -In the past, pipelines for monolithic applications tended to share the same characteristics of the application they were building. Each project had a single pipeline which was fairly complex, and different projects had completely different pipelines. Each pipeline was almost always connected to a single Git repository. - -{% include image.html -lightbox="true" -file="/images/guides/microservices/monolithic-pipelines.png" -url="/images/guides/microservices/monolithic-pipelines.png" -alt="Monolithic pipelines" -caption="Monolithic pipelines" -max-width="80%" -%} - -The complexity of each pipeline was detrimental to easy maintenance. Pipelines were typically controlled by a small team of gurus, familiar with both the internals of the application as well as the deployment environment. - -For each software project, operators handle the pipeline structure, while developers only work with the source code (going against the DevOps paradigm where all teams should share responsibility for common infrastructure and collaborate on shared problems). - -Pipeline size and complexity is often a huge pain point. Even though several tools exist for the continuous integration part of a monolithic application, continuous deployment being a different matter completely, forced a lot of companies to create their own custom in-house scripts to take care of deployment. - -## Scalability issues with microservice pipelines - -Microservices have of course several advantages regarding deployment and development, but they also come with their own challenges. Management of microservice repositories and pipelines becomes much harder as the number of applications grows. - -While a company might have to deal with 1–5 pipelines in the case of monolith applications (assuming 1–5 projects), the number quickly jumps to 25 if each monolith is divided into 5 microservices. - -These numbers are different per organization. It is perfectly normal for an application to have 10 microservices. So at a big organization that has 50 applications, the operator team is suddenly tasked with the management of 500+ pipelines. - -{% include image.html -lightbox="true" -file="/images/guides/microservices/moving-to-microservices.png" -url="/images/guides/microservices/moving-to-microservices.png" -alt="Number of pipelines is exploding" -caption="Number of pipelines is exploding" -max-width="80%" -%} - -This sudden explosion in numbers prohibits working manually with pipelines anymore. Several CI solutions do not have the capacity to work with such a high number of pipelines. - -**Here is where we reach the biggest pitfall regarding pipeline management in the era of microservices**. Several companies tried to solve the problem of microservice pipelines using shared pipeline segments. - -{% include image.html -lightbox="true" -file="/images/guides/microservices/shared-pipelines.png" -url="/images/guides/microservices/shared-pipelines.png" -alt="Shared libraries add extra complexity" -caption="Shared libraries add extra complexity" -max-width="80%" -%} - -In theory, this sounds like a good idea: - -1. Operators locate the common parts of pipelines with applications -1. A shared pipeline segment registry is created to hold all those common parts -1. Pipelines in existing projects are re-engineered to depend on the common segments -1. New projects must first examine the library of common pipeline segments and choose what is already there - -The final result is that a single pipeline is actually composed of two types of steps, those common to other pipelines, and those that are specific to that project only. - -This has lead to the development of several solutions which attempt to centralize common pipeline parts and re-use them in the form of “libraries” within software projects. The issue here is that this approach requires a very large time investment as well as a disciplined team that can communicate and cooperates on the following factors: - -1. Detecting which pipeline segments are indeed common, -1. Keeping the library of common pipeline segments up-to-date, -1. Disallowing copy-pasting of pipelines, -1. Development of brand new pipelines when needed, -1. Initial setup and pipeline bootstrap for each new project created. - -Unfortunately, in practice, as the number of microservice applications grows, teams find it very hard to keep all these principles in mind when creating new projects. - -## Reusing pipelines for microservice applications - -Codefresh is the first CI/CD solution for microservices and containers. Because we are not burdened with any legacy decisions, we are free to define a new model for Codefresh pipelines which is focused on microservices. - -The basic idea is that all microservices of a single application have almost always the same lifecycle. They are compiled, packaged, and deployed in a similar manner. Once this realization is in place, we can see that instead of having multiple pipelines for each microservice, where each one is tied to a Git repository, we have instead a single pipeline shared by all microservices. - -{% include image.html -lightbox="true" -file="/images/guides/microservices/microservice-pipelines.png" -url="/images/guides/microservices/microservice-pipelines.png" -alt="Keeping a single pipeline for all microservices" -caption="Keeping a single pipeline for all microservices" -max-width="80%" -%} - -The impact of this design cannot be understated. First of all, it should be clear that there is no need for sharing pipeline segments anymore. The whole pipeline is essentially the re-usable unit. - -This makes pipeline construction very simple. - -The biggest advantage, however, is the way new projects are created. When a new microservice is added in an application, the pipeline is already there and only a new trigger is added for that microservice. Notice that the pipeline is not connected to any specific Git repository anymore. All information for a repository is coming from the git trigger that started this pipeline. - -As an operator you can bootstrap a new project by quickly adding a new trigger on an existing pipeline: - -{% include image.html -lightbox="true" -file="/images/guides/microservices/single-pipeline.png" -url="/images/guides/microservices/single-pipeline.png" -alt="One pipeline with multiple microservices" -caption="One pipeline with multiple microservices" -max-width="80%" -%} - -This is the fastest way possible to bootstrap a new project. As the number of microservices is growing, the only thing that is growing is the list of triggers. All pipelines are exactly the same. - -## Creating reusable pipelines - -When working with microservices you need to remember that: - -1. In Codefresh a pipeline can stand on its own. It is **not** connected by default to any Git repository. -1. You can write Codefresh pipelines in a generic manner so that they can work with multiple applications. -1. If you connect multiple triggers to a single pipeline, all microservices will share that pipeline. -1. You can create multiple pipelines for each project if you have microservices with slightly different architecture. - -To create a reusable pipeline use the [generic form of the clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/): - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -steps: - clone: - title: "Cloning repository" - type: "git-clone" - repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' - revision: '${{CF_REVISION}}' - compile: - title: "Create JAR" - type: "freestyle" - image: 'maven:3.5.2-jdk-8-alpine' - working_directory: "${{clone}}" - commands: - - 'mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package' -{% endraw %} -{% endhighlight %} - -This pipeline uses variables in the clone step. These variables are automatically populated by the [respective trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/). So you can connect this pipeline to any number of Java repositories and it will work on all of them (assuming they use Maven). - -{% include image.html -lightbox="true" -file="/images/guides/microservices/multiple-triggers.png" -url="/images/guides/microservices/multiple-triggers.png" -alt="Connecting multiple triggers to a single pipeline" -caption="Connecting multiple triggers to a single pipeline" -max-width="100%" -%} - -Any time you run the pipeline you can select which trigger/branch you will use. So in the first case the values will be like this: - -* `CF_REPO_OWNER=kostis-codefresh` -* `CF_REPO_NAME=spring-petclinic` -* `CF_REVISION=another-branch` - -In the second case the values will be replaced like this: - -* `CF_REPO_OWNER=codefresh-contrib` -* `CF_REPO_NAME=spring-boot-2-sample-app` -* `CF_REVISION=master` - -You can follow the same pattern for any other kind of application (NodeJS, Python, Ruby etc.) - - -## Adding a new microservice to an existing application - -As an example, let's say that you have an application with five microservices. Two of them use Java and three use NodeJs. You can easily create two pipelines for the whole application, one for each programming language. - -However, if you take advantage of [multistage Docker builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/#production-ready-docker-images-with-multi-stage-builds), you could even have a single pipeline for all five services: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -steps: - clone: - title: "Cloning repository" - type: "git-clone" - repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' - revision: '${{CF_REVISION}}' - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: '${{CF_REPO_NAME}}' - working_directory: ./ - tag: 'multi-stage' - dockerfile: Dockerfile - deploy_to_k8s: - title: Deploy to cluster - type: deploy - kind: kubernetes - cluster: 'production-gke' - namespace: default - service: '${{CF_REPO_NAME}}' - candidate: - image: '${{build_app_image}}' -{% endraw %} -{% endhighlight %} - -This pipeline: - -1. Checks out source code from any connected trigger -1. Creates a Docker image (assumes a multistage Dockerfile) -1. Deploys the image to a Kubernetes cluster - - -Now, if you add another microservice to the application, you can simply add a new trigger making the addition as easy as possible: - -{% include image.html -lightbox="true" -file="/images/guides/microservices/add-new-microservice.png" -url="/images/guides/microservices/add-new-microservice.png" -alt="Connecting a new trigger for a new microservice" -caption="Connecting a new trigger for a new microservice" -max-width="80%" -%} - -This is just an example pipeline. You might have another generic pipeline for Helm deployments, FTP uploads, VM images and so on. - - -## Related articles -[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) -[Git triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) -[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) -[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) - - - - - - - diff --git a/_docs/ci-cd-guides/packaging-compilation.md b/_docs/ci-cd-guides/packaging-compilation.md deleted file mode 100644 index 924cb13ce..000000000 --- a/_docs/ci-cd-guides/packaging-compilation.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -title: "Building your app" -description: "Compile and package traditional (non-Docker) artifacts" -group: ci-cd-guides -toc: true ---- - -When you use Codefresh for continuous integration (CI), one of the most basic tasks is compiling and packaging applications. Though Codefresh has native support for Docker artifacts, it still works great with traditional (non-Dockerized) applications that don't use a Dockerfile for the actual build. - ->If your application is deployed as a Docker image, see [building Docker images]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) instead. - -## Using supporting Docker images in CI/CD environment - -Unlike other CI solutions that you might be familiar with, Codefresh build nodes are very simple. They have only Docker installed and nothing else. - -When you run a Codefresh pipeline, you choose the Docker images to be used in the CI/CD environment. Once the pipeline runs, the Docker images are automatically launched by Codefresh, and you have access to all the tools the images contain. When the pipeline completes its run, all Docker images used for the pipeline are discarded, and the build machine reverts to its original state. - -Even if your application is not itself packaged as a Docker image, Codefresh pipelines are always "Docker-based" in the sense that Docker is used for the tools that take part in the pipeline. - -This approach has a lot of advantages: - - * No maintenance effort for build nodes, as they only have Docker and nothing else. - * You can use any tool in your pipeline that you want without actually installing it first. - * All public Docker images in Docker Hub are potential pipeline steps. - * You can use different versions of the same tool in the same pipeline. - * It is very easy to upgrade a tool to a new version (just change the tag of the Docker container used) - -Notice also that unlike some other CI solutions: - -1. You can use multiple Docker images in the same pipeline, even if they contain the same tool, with no version conflicts -1. As Docker images in Codefresh pipelines have no special requirements, you can use *any* private or public Docker image. - -All [pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) in Codefresh are in fact Docker images. - - -## Choosing programming tools as Docker images - -In practice, this means that if you have a Node application, you need to use a [Node image]({{site.baseurl}}/docs/example-catalog/ci-examples/nodejs) to package your application, a [Maven image]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/) if you are working with Java, a [Python]({{site.baseurl}}/docs/learn-by-example/python/) image for Python applications, and so on. - -You launch the image using the Codefresh freestyle step. Here is an example for Node: - -`codefresh.yml` -{% highlight yaml %} -version: '1.0' -steps: - my_node_app: - title: Running unit tests - image: node:11 - commands: - - npm install - - npm run test -{% endhighlight %} - -This pipeline downloads the `node:11` image to the Codefresh build machine, launches it, and passes it to your source code. It then runs the commands `npm install` and `npm run test`. The result is that your source code can be packaged without actually installing Node.js on the build machine beforehand. - -You can mix and match different images in the same pipeline. Let's say for example that you have a single repository that contains a front-end in Node.js and a back-end in Java. You can easily create a pipeline that deals with both: - -`codefresh.yml` -{% highlight yaml %} -version: '1.0' -steps: - my_node_app: - title: Packaging front end - image: node:11 - working_directory: ./front-end - commands: - - npm install - - npm run test - my_jar_compilation: - title: Packaging back end - image: maven:3.5.2-jdk-8-alpine - working_directory: ./back-end - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package -{% endhighlight %} - -This pipeline compiles the Java code under the `back-end` folder, and the Javascript Web application found in the `front-end` folder. Both Docker images have access to the same workspace via [the shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). - -To get up and running with Codefresh as quickly as possible, you can simply search DockerHub for an existing image that uses the tool you need. Top-level DockerHub images are curated by the Docker team and are considered safe. So most popular programming languages already have a Docker image that you can use in your pipeline. - -Of course, you can also [create your private Docker image or use any existing image]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/) from a private or public registry. In that case, you need to write the full name of the image used. -If you use an image from GCR (Google Container Registry), or another private registry, you would specify it as in the example below. - -`codefresh.yml` -{% highlight yaml %} -version: '1.0' -steps: - my_bazel_app: - title: Running a Bazel build - image: gcr.io/my-registry/bazel - commands: - - bazel build //:MyProject - my_e2e_tests: - title: Running Mocha Test - image: my-azure-registry.azurecr.io/kostis-codefresh/my-jasmine-runner:1.0.1 - commands: - - jasmine init -{% endhighlight %} - -In this pipeline, Docker images have a full prefix, so they are pulled by the respective registry instead of DockerHub. - -In this manner, you can run any tool in any Codefresh pipeline as long as it is offered in a Docker image. This means that Codefresh pipelines can work with any programming language and any tool that you can use on your workstation. - -Unlike other CI solutions, you don't need to wait for the Codefresh team to add "native support" for your favorite tool in a Codefresh pipeline. You can simply package it in a Docker image yourself and use it straight away. - - -## Using multiple Docker images in a single pipeline - -Unlike other CI solutions, there is no limit on the number of Docker images that you can use in a single pipeline. Also, all Docker images included in the same pipeline have access to the same project workspace via the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). -This means that you have maximum flexibility on what tools you use in a single project. - -As an example, let's see a pipeline that uses four different images for a single project. - - -`codefresh.yml` -{% highlight yaml %} -version: '1.0' -steps: - clone: - title: Cloning main repository... - stage: prepare - type: git-clone - arguments: - repo: my-user/my-app - revision: master - git: github - package_my_code: - title: Compile application - image: 'maven:3.5.2-jdk-8-alpine' - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package - run_sonar: - title: Quality Analysis - image: sonarsource/sonar-scanner-cli - commands: - - sonar-scanner - environment: - - SONAR_HOST_URL=http://foo.acme:9000 - create_bucket: - title: Creating bucket in AWS - image: hashicorp/terraform:0.12.0 - commands: - - terraform init - - terraform apply -auto-approve - upload_file: - title: Uploading Jar file - image: mesosphere/aws-cli - commands: - - aws s3 sync ./target/app.jar s3://my-bucket/my-jar --delete -{% endhighlight %} - -This pipeline does the following: - -1. Checks out source code -1. Packages a Jar file (from the source code) -1. Runs Sonar analysis (taking into account both source code and compiled classes) -1. Creates a storage bucket in AWS (Amazon Web Services) -1. Uploads the JAR that was packaged in the bucket - -Notice how all Docker images use the same workspace without any extra configuration on your part. - -## Using different tool versions in the same pipeline - -The corollary to Docker-based pipelines is that you can use the same tool but with a different version in the **same** pipeline. -As an example, here is a pipeline that runs both Python 2.x and Python 3.x, and it just works. - -`codefresh.yml` -{% highlight yaml %} -version: '1.0' -steps: - get_deps: - title: Getting dependencies - image: python:3.6-slim - commands: - - pip install -r requirements.txt - run_my_tests: - title: Running Unit Test - image: python:2 - commands: - - pip install pytest - - pytest -{% endhighlight %} - -You can easily choose the specific version that matches each of your projects. - -
- -Here is another example where two different applications use Node.js 11 and Node.js 9 in the same pipeline. - -`codefresh.yml` -{% highlight yaml %} -version: '1.0' -stages: - - packaging - - deploying -steps: - PackageMyNode1App: - title: Packaging Node application 1 - stage: packaging - image: node:11.1 - working_directory: ./brand-new-project - commands: - - echo "My Node version is" - - node --version - - npm install - PackageMyNode2App: - title: Packaging Node application 2 - stage: packaging - image: node:9.3.0-slim - working_directory: ./legacy-project - commands: - - echo "My Node version is" - - node --version - - npm install -{% endhighlight %} - -> These versions are per pipeline. So each team can use the versions they need for their projects without affecting the other teams. - -So one team in your company might use Terraform 0.10 in their pipelines: - - -{% highlight yaml %} - PlanWithTerraform: - image: hashicorp/terraform:0.10.0 - title: Deploying Terraform plan - stage: deploy - commands: - - terraform plan -{% endhighlight %} - -Another team can use Terraform 0.12 just by changing the YAML of their `codefresh.yml`: - -{% highlight yaml %} - DeployWithTerraform: - image: hashicorp/terraform:0.12.0 - title: Deploying Terraform plan - stage: deploy - commands: - - terraform apply -auto-approve -{% endhighlight %} - - -To summarize, you can easily use any version of any programming tool in a Codefresh pipeline without the fear of breaking -another unrelated pipeline. - - -## Related articles -[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) -[Creating Codefresh pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) -[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) -[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) - - - - - - diff --git a/_docs/ci-cd-guides/preview-environments.md b/_docs/ci-cd-guides/preview-environments.md deleted file mode 100644 index 15e6ee2d4..000000000 --- a/_docs/ci-cd-guides/preview-environments.md +++ /dev/null @@ -1,347 +0,0 @@ ---- -title: "Previewing dynamic environments" -description: "Deploy pull requests to cluster namespaces" -group: ci-cd-guides -toc: true ---- - - -In addition to deploying to [predefined environments]({{site.baseurl}}/docs/ci-cd-guides/environment-deployments/), for each pull request (PR), you may also need to deploy to dynamic environments, which are temporary, testing environments. For these types of environments, it is best to dynamically create an environment when a PR is created, and tear it down when the same PR is closed. - - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/dynamic-environments.png" -url="/images/guides/preview-environments/dynamic-environments.png" -alt="Dynamic Test environments" -caption="Dynamic Test environments" -max-width="90%" -%} - -Each developer works in isolation to test their features. This pattern contrasts with the traditional way of reusing static preexisting environments. - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/static-environments.png" -url="/images/guides/preview-environments/static-environments.png" -alt="Traditional static environments" -caption="Traditional static environments" -max-width="90%" -%} - -With Kubernetes you don't need to book and release specific test environments any more. Testing environments should -be handled in a transient way. - -## Preview environments with Kubernetes - -There are many options to create temporary environments with Kubernetes. - -* Namespaces for each PR - The simplest option is to use different namespaces, one for each PR. So, a PR with name `fix-db-query` is deployed to a namespace called `fix-db-query`, and a PR with name `JIRA-1434`, is deployed to a namespace called `JIRA-1434` and so on. - -* Expose the environment URL - The second option is to expose the environment URL so that developers and testers can actually preview the application -deployment either manually or via automated tests. - The two major approaches here are with host-based and path-based URLs: - * For host-based URLs, the test environments are named `pr1.example.com`, `pr2.example.com` and so on - * For path-based URLs, the test environments are named `example.com/pr1`, `example.com/pr2` and so on - - Both approaches have advantages and disadvantages. Path-based URLs are easier to set up, but may not work with all applications, as they change the web context. Host-based URLs are more robust but need extra DNS configuration for the full effect. - - In Kubernetes clusters, you can set up types of URLs via [an Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/){:target="\_blank"}. - -## Example application - -You can find the application we will use at [https://github.com/codefresh-contrib/unlimited-test-environments-source-code](https://github.com/codefresh-contrib/unlimited-test-environments-source-code){:target="\_blank"}. -It is a standard Java/Spring boot application, that includes the following characteristics: - -* It has [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) that can be targeted at any host/port. We will use those tests as smoke test that will verify the preview environment after it is deployed -* It comes bundled in [a Helm chart](https://github.com/codefresh-contrib/unlimited-test-environments-manifests){:target="\_blank"} -* It has an ingress configuration ready for path-based URLs - -We are using [the Ambassador gateway](https://www.getambassador.io/){:target="\_blank"} as an ingress for this example, but you can use any Kubernetes-compliant ingress. - -Here is the [ingress manifest](https://github.com/codefresh-contrib/unlimited-test-environments-manifests/blob/main/simple-java-app/templates/ingress.yaml){:target="\_blank"}. - -{% highlight yaml %} -{% raw %} -kind: Ingress -apiVersion: extensions/v1beta1 -metadata: - name: "simple-java-app-ing" - annotations: - kubernetes.io/ingress.class: {{ .Values.ingress.class }} - -spec: - rules: - - http: - paths: - - path: {{ .Values.ingress.path }} - backend: - serviceName: simple-service - servicePort: 80 -{% endraw %} -{% endhighlight %} - -The path of the application is configurable and can be set at deploy time. - -## Creating preview environments for each PR - -Each time a PR is created, we want to perform the following tasks: - -1. Compile the application and run unit tests. -1. Run security scans, quality checks, and everything else we need to decide if the PR is valid. -1. Create a namespace with the same name as the PR branch. Deploy the PR and expose it as a URL that has the same name as the branch. - -Here is an example pipeline that does all these tasks: - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/pull-request-preview-pipeline.png" -url="/images/guides/preview-environments/pull-request-preview-pipeline.png" -alt="Pull Request preview pipeline" -caption="Pull Request preview pipeline" -max-width="100%" -%} - -This pipeline has the following steps: - -1. A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to fetch the source code of the application. -1. A [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that runs Maven for compilation and unit tests. -1. A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create the Docker image of the application. -1. A step that scans the source code for security issues with [Snyk](https://snyk.io/){:target="\_blank"}. -1. A step that scans the container image [for security issues]({{site.baseurl}}/docs/testing/security-scanning/) with [trivy](https://github.com/aquasecurity/trivy){:target="\_blank"}. -1. A step that runs [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) by launching the app in a [service container]({{site.baseurl}}/docs/pipelines/service-containers/). -1. A step for [Sonar analysis]({{site.baseurl}}/docs/testing/sonarqube-integration/). -1. A step that clones [a second Git repository](https://github.com/codefresh-contrib/unlimited-test-environments-manifests){:target="\_blank"} with the [Helm chart]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) of the application. -1. A step that deploys the source code to a new namespace. -1. A step that [adds a comment on the PR](https://codefresh.io/steps/step/kostis-codefresh%2Fgithub-pr-comment){:target="\_blank"} with the URL of the temporary environment. -1. A step that runs smoke tests against the temporary test environment. - -Note that the integration tests and security scans are just examples of what you can do before the PR is deployed. You can insert your own steps that check the content of a PR. - -Here is the complete YAML definition: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - "prepare" - - "verify" - - "deploy" - -steps: - main_clone: - title: "Cloning repository" - type: "git-clone" - repo: "codefresh-contrib/unlimited-test-environments-source-code" - revision: "${{CF_REVISION}}" - stage: "prepare" - - run_unit_tests: - title: Compile/Unit test - stage: prepare - image: 'maven:3.5.2-jdk-8-alpine' - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package - build_app_image: - title: Building Docker Image - type: build - stage: prepare - image_name: kostiscodefresh/spring-actuator-sample-app - working_directory: ./ - tag: '${{CF_BRANCH}}' - dockerfile: Dockerfile - scan_code: - title: Source security scan - stage: verify - image: 'snyk/snyk-cli:maven-3.6.3_java11' - commands: - - snyk monitor - scan_image: - title: Container security scan - stage: verify - image: 'aquasec/trivy' - commands: - - trivy image docker.io/kostiscodefresh/spring-actuator-sample-app:${{CF_BRANCH}} - run_integration_tests: - title: Integration tests - stage: verify - image: maven:3.5.2-jdk-8-alpine - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://my-spring-app -Dsonar.organization=kostis-codefresh-github - services: - composition: - my-spring-app: - image: '${{build_app_image}}' - ports: - - 8080 - readiness: - timeoutSeconds: 30 - periodSeconds: 15 - image: byrnedo/alpine-curl - commands: - - "curl http://my-spring-app:8080/" - sonar_scan: - title: Sonar Scan - stage: verify - image: 'maven:3.8.1-jdk-11-slim' - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository sonar:sonar -Dsonar.login=${{SONAR_TOKEN}} -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=kostis-codefresh-github - clone: - title: "Cloning repository" - type: "git-clone" - repo: "codefresh-contrib/unlimited-test-environments-manifests" - revision: main - stage: "deploy" - deploy: - title: Deploying Helm Chart - type: helm - stage: deploy - working_directory: ./unlimited-test-environments-manifests - arguments: - action: install - chart_name: simple-java-app - release_name: my-spring-app - helm_version: 3.2.4 - kube_context: myawscluster - namespace: ${{CF_BRANCH_TAG_NORMALIZED}} - cmd_ps: '--create-namespace --wait --timeout 5m' - custom_values: - - 'image_tag=${{CF_BRANCH_TAG_NORMALIZED}}' - - 'replicaCount=3' - - 'ingress_path=/${{CF_BRANCH_TAG_NORMALIZED}}/' - add_pr_comment: - title: Adding comment on PR - stage: deploy - type: kostis-codefresh/github-pr-comment - fail_fast: false - arguments: - PR_COMMENT_TEXT: "[CI] Staging environment is at https://kostis.sales-dev.codefresh.io/${{CF_BRANCH_TAG_NORMALIZED}}/" - GIT_PROVIDER_NAME: 'github-1' - run_smoke_tests: - title: Smoke tests - stage: deploy - image: maven:3.5.2-jdk-8-alpine - working_directory: "${{main_clone}}" - fail_fast: false - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=https://kostis.sales-dev.codefresh.io/${{CF_BRANCH_TAG_NORMALIZED}}/ -Dserver.port=443 -{% endraw %} -{% endhighlight %} - -The end result of the pipeline is a deployment to the path that has the same name as the PR branch. For -example, if my branch is named `demo`, then a `demo` namespace is created on the cluster and the application -is exposed on the `/demo/` context: - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/demo-path.png" -url="/images/guides/preview-environments/demo-path.png" -alt="Temporary environment" -caption="Temporary environment" -max-width="100%" -%} - -The environment is also mentioned as a comment in the PR UI in GitHub: - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/pull-request-comment.png" -url="/images/guides/preview-environments/pull-request-comment.png" -alt="Pull Request comment" -caption="Pull Request comment" -max-width="100%" -%} - -As explained in [pull Requests]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/), we want to make this pipeline applicable only -to a PR-open event and PR-sync events that capture commits on an existing pull request. - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/pr-events.png" -url="/images/guides/preview-environments/pr-events.png" -alt="Git events for a Pull Request preview pipeline" -caption="Git events for a Pull Request preview pipeline" -max-width="100%" -%} - -Therefore, you need to set up your [pipeline triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) with the same options selected as shown in the picture above. - -## Cleaning up temporary environments - -Creating temporary environments is very convenient for developers, but can be very costly for your infrastructure if you use a cloud -provider for your cluster. For cost reasons and better resource utilization, it is best to destroy temporary environments that are no longer used. - -While you can run a batch job that automatically deletes old temporary environments, the optimal approach is to delete them as soon as the respective PR is closed. - -We can do that with a very simple pipeline that has only one step: - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/pull-request-closed-pipeline.png" -url="/images/guides/preview-environments/pull-request-closed-pipeline.png" -alt="Pipeline when a Pull Request is closed" -caption="Pipeline when a Pull Request is closed" -max-width="100%" -%} - -Here is the pipeline definition: - - `codefresh-close.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -steps: - delete_app: - title: Delete app - type: helm - arguments: - action: auth - helm_version: 3.2.4 - kube_context: myawscluster - namespace: ${{CF_BRANCH_TAG_NORMALIZED}} - commands: - - helm delete my-spring-app --namespace ${{CF_BRANCH_TAG_NORMALIZED}} - - kubectl delete namespace ${{CF_BRANCH_TAG_NORMALIZED}} -{% endraw %} -{% endhighlight %} - -The pipeline just uninstalls the Helm release for that namespace, and then deletes the namespace itself. - -To have this pipeline run only when a PR is closed, here are the [triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) to select: - -{% include image.html -lightbox="true" -file="/images/guides/preview-environments/close-events.png" -url="/images/guides/preview-environments/close-events.png" -alt="Git events for a Pull Request close pipeline" -caption="Git events for a Pull Request close pipeline" -max-width="100%" -%} - -With this setup, the pipeline runs when the PR is closed, regardless of whether it was merged or not (which is exactly what you want as in both cases the test environment is not needed anymore). - -## Viewing all environments in the Codefresh UI - -You can combine the pipeline above with any Codefresh UI dashboard if you want to see all your temporary environments in a single view. - -For more information, see: -* [Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) -* [Helm promotion dashboard]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) -* [GitOps dashboard]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/#working-with-the-gitops-dashboard) - - - -## Related articles -[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) -[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) -[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) -[Working with Docker registries]({{site.baseurl}}/docs/integrations/docker-registries/) - - - - - - diff --git a/_docs/ci-cd-guides/progressive-delivery.md b/_docs/ci-cd-guides/progressive-delivery.md deleted file mode 100644 index 11780fdce..000000000 --- a/_docs/ci-cd-guides/progressive-delivery.md +++ /dev/null @@ -1,958 +0,0 @@ ---- -title: "Progressive Delivery" -description: "Perform zero downtime deployments with Argo Rollouts" -group: ci-cd-guides -toc: true ---- - -Progressive Delivery is the practice of deploying an application in a gradual manner, allowing for minimum downtime and easy rollbacks. There are several forms of progressive delivery such as blue/green, canary, a/b, and feature flags. - -Codefresh can easily integrate with [Argo Rollouts](https://argoproj.github.io/argo-rollouts/){:target="\_blank"}, a Kubernetes operator that natively supports deployment practices for progressive delivery. - -## Installing the Argo Rollouts operator to your cluster - -To install Argo Rollouts, follow the [installation instructions](https://argoproj.github.io/argo-rollouts/installation/){:target="\_blank"}. Essentially, you need a terminal with `kubectl` access to your cluster. - -``` -kubectl create namespace argo-rollouts -kubectl apply -n argo-rollouts -f https://raw.githubusercontent.com/argoproj/argo-rollouts/stable/manifests/install.yaml -``` - -You can optionally install the [CLI locally](https://github.com/argoproj/argo-rollouts/releases/latest){:target="\_blank"}, if you want to have more visibility in your deployments. - - -## Blue/Green deployments - -Blue/Green deployments are one of the simplest ways to minimize deployment downtime. Blue/Green deployments are not specific to Kubernetes, and can be used even for traditional applications that reside on Virtual Machines. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/how-blue-green-works.png" -url="/images/guides/progressive-delivery/how-blue-green-works.png" -alt="Blue/Green Deployments" -caption="Blue/Green Deployments" -max-width="50%" -%} - -1. At first all users of the application are routed to the current version (shown in blue). A key point is that all traffic passes from a load balancer. -1. A new version is deployed (shown in green). As this version does not receive any live traffic, all users are still served by the previous/stable version. -1. Developers can test internally the new green version, and verify its validity. If it is valid, traffic is switched to that new version. -1. If everything goes well, the old version is completely discarded. We are back to the initial state (order of colors does not matter). - -The major benefit of this pattern is that if at any point in time the new version has issues, all users can be switched back to the previous version (via the load balancer). Switching via the load balancer is much faster than redeploying a new version, resulting in minimum disruption for existing users. - -There are several variations of this pattern. In some cases, the old color is never destroyed but keeps running in the background. You can also retain even older versions online, maybe with a smaller footprint, allowing for easy switching to any previous application revision. - -### Blue/Green Kubernetes Deployment with Argo Rollouts - -Even though Argo Rollouts supports the basic blue/green pattern described in the previous section, it also offers a wealth of [customization options](https://argoproj.github.io/argo-rollouts/features/bluegreen/){:target="\_blank"}. -One of the most important additions is the ability to "test" the upcoming color by introducing a "preview" [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/){:target="\_blank"}, in addition to the service used for live traffic. -This preview service can be used by the team that performs the deployment to verify the new version before actually switching the traffic. - - -Here is the initial state of a deployment. The example uses two pods (shown as `xnsdx` and `jftql` in the diagram). - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/01_initial.png" -url="/images/guides/progressive-delivery/01_initial.png" -alt="Initial deployment. All services point to active version" -caption="Initial deployment. All services point to active version" -max-width="90%" -%} - -There are two Kubernetes services: -* A `rollout-blue-gree-active` service that captures all live traffic from actual users of the application (internet traffic coming from `51.141.221.40`). -* A secondary service called `rollout-bluegreen-preview`. Under normal circumstances it also points to the same live version. - - -Once a deployment starts, a new "color" is created. In the example we have two new pods that represent the next version of the application to be deployed (shown as `9t67t` and `7vs2m`). - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/02_two_colors.png" -url="/images/guides/progressive-delivery/02_two_colors.png" -alt="Deployment in progress. Active users see old version. Internal users preview new version" -caption="Deployment in progress. Active users see old version. Internal users preview new version" -max-width="90%" -%} - -The important point here is the fact that the normal "active" service still points to the old version, while the "preview" service points to the new pods. This means that all active users are still on the old/stable deployment, while internal teams can use the "preview" service to test the new deployment. - -If everything goes well, the next version is promoted to be the active version. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/03_switch_traffic.png" -url="/images/guides/progressive-delivery/03_switch_traffic.png" -alt="Next application version is promoted. All users see new version" -caption="Next application version is promoted. All users see new version" -max-width="90%" -%} - -Here both services point to the new version. This is also the critical moment for all actual users of the application, as they are now switched to use the new version of the application. The old version is still around but no traffic is sent to it. - -Having the old version around is a great failsafe, as one can abort the deployment process and switch back all active users to the old deployment in the fastest way possible. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/04_scale_down.png" -url="/images/guides/progressive-delivery/04_scale_down.png" -alt="Old application version is discarded. Only new version remains." -caption="Old application version is discarded. Only new version remains." -max-width="90%" -%} - -After the configured duration, as [defined in Argo Rollouts](https://argoproj.github.io/argo-rollouts/features/bluegreen/#scaledowndelayseconds){:target="\_blank"}, the old version is scaled down completely to preserve resources. We are now back -to the same configuration as the initial state, and the next deployment will follow the same sequence of events. - - -### Example application - -You can find an example application at [https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app](https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app){:target="\_blank"}, that also includes simple integration tests. - -Notice that the first deployment of your application will NOT follow the blue/green deployment process as there is no "previous" color. So you need to deploy it at least once. - -``` -git clone https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app.git -cd argo-rollout-blue-green-sample-app -kubectl create ns blue-green -kubectl apply -f ./blue-green-manual-approval -n blue-green -``` - -You can then monitor what Argo Rollouts is doing with the following command: - -``` -kubectl argo rollouts get rollout spring-sample-app-deployment --watch -n blue-green -``` - -### Blue/Green deployment with manual approval - -A quick way to use blue/green deployments is by simply inserting [an approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) before the traffic switch step. -This will pause the pipeline and the developers or QA can test the next version on their own before any live users are redirected to it. - -Here is an example pipeline: - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/approval-pipeline.png" -url="/images/guides/progressive-delivery/approval-pipeline.png" -alt="Manual approval before traffic switch" -caption="Manual approval before traffic switch" -max-width="100%" -%} - -This pipeline does the following: - -1. [Clones]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) the source code of the application. -1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image. -1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest, and creates a new "color" for the next version -1. The pipeline is paused and waits for an [approval/rejection]({{site.baseurl}}/docs/pipelines/steps/approval/#getting-the-approval-result) by a human user. -1. If the pipeline is approved, the new color is promoted, and becomes the new active version. -1. If the pipeline is rejected, the new color is discarded, and all live users are not affected in any way. - -Here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - prepare - - build - - deploy - - finish -steps: - clone: - type: "git-clone" - stage: prepare - description: "Cloning main repository..." - repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' - revision: "${{CF_BRANCH}}" - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: kostiscodefresh/argo-rollouts-blue-green-sample-app - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: Dockerfile - start_deployment: - title: Deploying new color - stage: deploy - image: codefresh/cf-deploy-kubernetes:master - working_directory: "${{clone}}" - commands: - - /cf-deploy-kubernetes ./blue-green-manual-approval/service.yaml - - /cf-deploy-kubernetes ./blue-green-manual-approval/service-preview.yaml - - /cf-deploy-kubernetes ./blue-green-manual-approval/rollout.yaml - environment: - - KUBECONTEXT=mydemoAkscluster@BizSpark Plus - - KUBERNETES_NAMESPACE=blue-green - wait_for_new_color: - fail_fast: false - type: pending-approval - title: Is the new color ok? - stage: deploy - promote_color: - title: Switching traffic to new color - stage: finish - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 promote spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: wait_for_new_color - on: - - approved - abort_deployment: - title: Keeping the existing color - stage: finish - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 undo spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: wait_for_new_color - on: - - denied -{% endraw %} -{% endhighlight %} - -Just before the approval, you can optionally execute the Argo Rollouts CLI to see what is happening behind the scenes: - -``` -kubectl argo rollouts get rollout spring-sample-app-deployment --watch -n blue-green -``` - -It should show the new color come up, but not accepting any traffic. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/monitor-argo-rollouts.png" -url="/images/guides/progressive-delivery/monitor-argo-rollouts.png" -alt="Argo Rollouts CLI" -caption="Argo Rollouts CLI" -max-width="100%" -%} - -Once the deployment is complete, the old pods are destroyed after 30 seconds (this is the default value of Argo Rollouts). - - - -### Blue/Green deployment with smoke tests - -Using manual approval before promoting the new version is a great starting point. To truly achieve continuous deployment, one should automate the testing process and eliminate the human approval. - -There are many approaches on testing a release, and each organization will have a different set of "tests" that verify the next version of the software. Argo Rollouts -has [several integrations](https://argoproj.github.io/argo-rollouts/features/analysis/){:target="\_blank"} either with metric providers or [simple Kubernetes jobs](https://argoproj.github.io/argo-rollouts/analysis/job/){:target="\_blank"} that can run integration tests or collect metrics and decide if the next color should be promoted or not. - -Another alternative is to simply execute [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) from within Codefresh. This is great if your integration tests need access to the source code or other external services that are accessible only to Codefresh. - -We can modify the previous pipeline to include automated smoke tests that are already part of the [example application](https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app/blob/main/src/test/java/sample/actuator/HealthIT.java){:target="\_blank"}. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/smoke-tests-pipeline.png" -url="/images/guides/progressive-delivery/smoke-tests-pipeline.png" -alt="Smoke tests before traffic switch" -caption="Smoke tests before traffic switch" -max-width="100%" -%} - -This pipeline does the following: - -1. [Clones]({{site.baseurl}}/docs/examples/example-catalog/ci-examples/git-checkout/) the source code of the application. -1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image -1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest and creates a new "color" for the next version. -1. Runs integration tests against the "preview" service created by Argo Rollouts. Live users are still on the previous/stable version of the application. -1. If smoke tests pass, the new color is promoted and becomes the new active version. -1. If smoke tests fail, the new color is discarded and all live users are not affected in any way. - -Here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - prepare - - build - - deploy - - finish -steps: - clone: - type: "git-clone" - stage: prepare - description: "Cloning main repository..." - repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' - revision: "${{CF_BRANCH}}" - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: kostiscodefresh/argo-rollouts-blue-green-sample-app - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: Dockerfile - start_deployment: - title: Deploying new color - stage: deploy - image: codefresh/cf-deploy-kubernetes:master - working_directory: "${{clone}}" - commands: - - /cf-deploy-kubernetes ./blue-green-manual-approval/service.yaml - - /cf-deploy-kubernetes ./blue-green-manual-approval/service-preview.yaml - - /cf-deploy-kubernetes ./blue-green-manual-approval/rollout.yaml - environment: - - KUBECONTEXT=mydemoAkscluster@BizSpark Plus - - KUBERNETES_NAMESPACE=blue-green - run_integration_tests: - title: Smoke tests - stage: deploy - image: maven:3.5.2-jdk-8-alpine - working_directory: "${{clone}}" - fail_fast: false - commands: - - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://13.86.102.74 -Dserver.port=80 - promote_color: - title: Switching traffic to new color - stage: finish - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 promote spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: run_integration_tests - on: - - success - abort_deployment: - title: Keeping the existing color - stage: finish - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 undo spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: run_integration_tests - on: - - failure -{% endraw %} -{% endhighlight %} - -You can optionally execute the Argo Rollouts CLI to see what is happening behind the scenes: - -``` -kubectl argo rollouts get rollout spring-sample-app-deployment --watch -n blue-green -``` - ->For the sake of simplicity, we have hardcoded the load balancer for the preview service at 13.86.102.74. For an actual application, you would have a DNS name such as `preview.example.com`, or use another `kubectl command` to fetch the endpoint of the load balancer dynamically. Also, our integration tests assume that the application is already deployed, before they run. If your application takes too much time to deploy, you need to make sure that it is up before the tests actually run. - - -The end result is a continuous deployment pipeline, where all release candidates that don't pass tests never reach production. - -## Canary deployments - -Blue/Green deployments are great for minimizing downtime after a deployment, but they are not perfect. If your new version has a hidden issue that manifests itself only after some time (i.e. it is not detected by your smoke tests), then **all** your users will be affected, because the traffic switch is all or nothing. - -An improved deployment method is canary deployments. These function similar to blue/green, but instead of switching 100% of live traffic all at once to the new version, you can instead move only a subset of users. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/how-canary-deployments-work.png" -url="/images/guides/progressive-delivery/how-canary-deployments-work.png" -alt="Canary Deployments" -caption="Canary Deployments" -max-width="50%" -%} - -1. At the start, all users of the application are routed to the current version (shown in blue). A key point is that all traffic passes from a load balancer. -1. A new version is deployed (shown in green). This version gets only a very small amount of live traffic (for example 10%). -1. Developers can test internally and monitor their metrics to verify the new release. If they are confident, they can redirect more traffic to the new version (for example 33%). -1. If everything goes well, the old version is completely discarded. All traffic is now redirected to the new version. We are back to initial state (order of colors does not matter). - -The major benefit of this pattern is that if at any point in time the new version has issues, only a small subset of live users are affected. And like blue/green deployments, performing a rollback is as easy as resetting the load balancer to send no traffic to the canary version. Switching the load balancer is much faster than redeploying a new version, resulting in minimum disruption for existing users. - -There are several variations of this pattern. The amount of live traffic that you send to the canary at each step as well as the number of steps are user configurable. A simple approach would have just two steps (10%, 100%), while a more complex one could move traffic in a gradual way (10%, 30%, 60%, 90%, 100%). - ->Canary deployments are more advanced than blue/green deployments, and are also more complex to set up. The load balancer is now much smarter as it can handle two streams of traffic at the same time with different destinations of different weights. You also need a way (usually an API) to instruct the loadbalancer to change the weight distribution of the traffic streams. If you are just getting started with progressive delivery, we suggest you master blue/green deployments first, before adopting canaries. - -### Canary Deployment with Argo Rollouts - -Argo Rollouts supports the basic canary pattern described in the previous section, and also offers a wealth of [customization options](https://argoproj.github.io/argo-rollouts/features/canary/){:target="\_blank"}. -One of the most important -additions is the ability to "test" the upcoming version by introducing a "preview" [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/){:target="\_blank"}, in addition to the service used for live traffic. -This preview service can be used by the team that performs the deployment to verify the new version as it gets used by the subset of live users. - - -Here is the initial state of a deployment. The example uses four pods (shown as `22nqx`, `nqksq`, `8bzwh` and `jtdcc` in the diagram). - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/01_canary_initial_state.png" -url="/images/guides/progressive-delivery/01_canary_initial_state.png" -alt="Initial deployment. All services point to active version" -caption="Initial deployment. All services point to active version" -max-width="90%" -%} - -There are now three Kubernetes services: -* The `rollout-canary-all-traffic` that captures all live traffic from actual users of the application (internet traffic coming from `20.37.135.240`). -* A secondary service, `rollout-canary-active`, that always points to the stable/previous version of the software. -* A third service, `rollout-canary-preview`, that only routes traffic to the canary/new versions. - -In normal circumstances all there services point to the same version. - - -Once a deployment starts, a new "version" is created. In the example we have one new pod that represents the next version of the application to be deployed (shown as `9wx8w` at the top of the diagram). - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/02_canary_10.png" -url="/images/guides/progressive-delivery/02_canary_10.png" -alt="Deployment in progress. 10% of users are sent to the canary version" -caption="Deployment in progress. 10% of users are sent to the canary version" -max-width="90%" -%} - -The important point here is the fact that the service used by live users (called `rollout-canary-all-traffic`) routes traffic to **both** the canary and the previous version. It is not visible in the diagram, but only 10% of traffic is sent to the single pod that hosts the new version, while 90% of traffic goes to the four pods of the old version. - -The `rollout-canary-preview` service goes only to the canary pod. You can use this service to examine metrics from the canary or even give it to users who always want to try the new version first (e.g. your internal developers). On the other hand, the `rollout-canary-active` service always goes to the stable version. You can use that for users who never want to try the new version first or for verifying how something worked in the previous version. - - - -If everything goes well, and you are happy with how the canary works, we can redirect some more traffic to it. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/03_canary_33.png" -url="/images/guides/progressive-delivery/03_canary_33.png" -alt="Deployment in progress. 33% of users are sent to the canary version" -caption="Deployment in progress. 33% of users are sent to the canary version" -max-width="90%" -%} - -We are now sending 33% of live traffic to the canary (the traffic weights are not visible in the picture). To accommodate the extra traffic, the canary version now has two pods instead of one. This is also another great feature of Argo Rollouts. The amount of pods you have in the canary is completely unrelated to the amount of traffic that you send to it. You can have all possible combinations that you can think of (e.g. 10% of traffic to five pods, or 50% of traffic to three pods and so on). It all depends on the resources used by your application. - -It makes sense of course to gradually increase the number of pods in the canary as you shift more traffic to it. - -Having the old version around is a great failsafe, as one can abort the deployment process and switch back all active users to the old deployment in the fastest way possible -by simply telling the load balancer to move 100% of traffic back to the previous version. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/04_canary_finished.png" -url="/images/guides/progressive-delivery/04_canary_finished" -alt="Old application version is discarded. Only new version remains." -caption="Old application version is discarded. Only new version remains." -max-width="90%" -%} - -Two more pods are launched for the canary (for a total of four), and finally we can shift 100% of live traffic to it. After some time,the old version is scaled down completely to preserve resources. We are now back -to the same configuration as the initial state, and the next deployment will follow the same sequence of events. - -### Example application - -You can find an example application at [https://github.com/codefresh-contrib/argo-rollout-canary-sample-app](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app){:target="\_blank"} that also includes simple metrics (we will use them in the second example with canaries). - -Notice that the first deployment of your application will NOT follow the canary deployment process as there is no "previous" version. So you need to deploy it at least -once. - -``` -git clone https://github.com/codefresh-contrib/argo-rollout-canary-sample-app.git -cd argo-rollout-canary-sample-app -kubectl create ns canary -kubectl apply -f ./canary-manual-approval -n canary -``` - -You can then monitor what argo rollouts is doing with the following command: - -``` -kubectl argo rollouts get rollout golang-sample-app-deployment --watch -n canary -``` - -### Choosing a solution for Traffic Management - -Unlike Blue/Green deployments, canary deployments require a smarter way to handle incoming traffic to your application. Specifically for Kubernetes, you need a networking solution that can split traffic according to percentages. Kubernetes on its own performs simple load balancing where the number of pods affects the traffic they get. But that is not enough for canary deployments. - -Argo Rollouts has [several integrations](https://argoproj.github.io/argo-rollouts/features/traffic-management/){:target="\_blank"} with Service Meshes and ingresses that can be used for Traffic Splits. - -Apart from the platforms that are supported natively by Argo Rollouts, you can also use any solution that implements the [Service Mesh Interface (SMI)](https://smi-spec.io/){:target="\_blank"}, a common -standard for service mesh implementations. Argo Rollouts [adheres to the SMI spec](https://argoproj.github.io/argo-rollouts/features/traffic-management/smi/){:target="\_blank"}, and can instruct any compliant solution for the traffic split process during canaries. - -In our example we are using [LinkerD](https://linkerd.io/){:target="\_blank"}, an open source service mesh solution for Kubernetes that also implements SMI. -You can install LinkerD by following [the official documentation](https://linkerd.io/2.10/getting-started/){:target="\_blank"} in your cluster and then making sure that your application is [meshed](https://linkerd.io/2.10/tasks/adding-your-service/){:target="\_blank"} (i.e. it is managed by LinkerD) by adding the special annotation [linkerd.io/inject:enabled](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/canary-manual-approval/rollout.yaml#L36){:target="\_blank"} in the rollout YAML. - - -### Canary deployment with manual approval - -As with Blue/Green deployments, the easiest way to use canaries is by simply inserting [an approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) before each subsequent traffic switch step. -This will pause the pipeline and the developers or QA team can evaluate the canary stability. - -Here is the [Canary setup](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/canary-manual-approval/rollout.yaml#L8){:target="\_blank"}: - -`rollout.yaml` (excerpt) -```yaml -spec: - replicas: 4 - strategy: - canary: - canaryService: rollout-canary-preview - stableService: rollout-canary-active - trafficRouting: - smi: - trafficSplitName: rollout-example-traffic-split - rootService: rollout-canary-all-traffic - steps: - - setWeight: 10 - - setCanaryScale: - weight: 25 - - pause: {} - - setWeight: 33 - - setCanaryScale: - weight: 50 - - pause: {} -``` - -The canary has essentially three stages. At the beginning, it gets only 10% of the traffic and then it stops. At this point it creates 1/4 of pods. Then -if we promote it, it gets 33% of the traffic and is now scaled up to 1/2 the number of pods constituting a full deployment. We pause again and then finally it gets 100% of -live traffic. - - -Here is the pipeline with canary steps: - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/canary-manual-approval-pipeline.png" -url="/images/guides/progressive-delivery/canary-manual-approval-pipeline.png" -alt="Manual approval with two intermediate canary steps" -caption="Manual approval with two intermediate canary steps" -max-width="100%" -%} - -This pipeline does the following: - -1. [Clones]({{site.baseurl}}/docs/example-catalog/examples/git-checkout/) the source code of the application. -1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image -1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest and creates a new version. 10% of live traffic is redirected to it. -1. The pipeline is paused and waits for an [approval/rejection]({{site.baseurl}}/docs/pipelines/steps/approval/#getting-the-approval-result) by a human user. -1. If the pipeline is approved, 33% of traffic is now sent to the canary. If the pipeline is rejected, the canary is discarded and all traffic goes back to the stable version. -1. In the next pause, the pipeline waits for a second approval. -1. If the pipeline is approved, all traffic is now sent to the canary. If the pipeline is rejected, the canary is discarded and all traffic goes back to the stable version. - -Here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - prepare - - build - - 'canary 10%' - - 'canary 33%' - - finish -steps: - clone: - type: "git-clone" - stage: prepare - description: "Cloning main repository..." - repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' - revision: "${{CF_BRANCH}}" - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: kostiscodefresh/argo-rollouts-canary-sample-app - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: Dockerfile - start_deployment: - title: Deploy to 10% of live traffic - stage: 'canary 10%' - image: codefresh/cf-deploy-kubernetes:master - working_directory: "${{clone}}" - commands: - - /cf-deploy-kubernetes ./canary-manual-approval/service.yaml - - /cf-deploy-kubernetes ./canary-manual-approval/service-preview.yaml - - /cf-deploy-kubernetes ./canary-manual-approval/service-all.yaml - - /cf-deploy-kubernetes ./canary-manual-approval/rollout.yaml - environment: - - KUBECONTEXT=mydemoAkscluster@BizSpark Plus - - KUBERNETES_NAMESPACE=canary - check_canary_10: - fail_fast: false - type: pending-approval - title: Is canary ok? - stage: 'canary 10%' - promote_canary_33: - title: Switching 33% traffic to canary - stage: 'canary 33%' - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 promote golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: check_canary_10 - on: - - approved - abort_deployment_10: - title: Discarding canary at 10% - stage: 'canary 10%' - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 undo golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: check_canary_10 - on: - - denied - exit_10: - title: Stopping pipeline - stage: 'canary 10%' - image: alpine:39 - commands: - - echo "Canary failed" - - exit 1 - when: - steps: - - name: check_canary_10 - on: - - denied - check_canary_33: - fail_fast: false - type: pending-approval - title: Is canary ok? - stage: 'canary 33%' - promote_canary_full: - title: Switching all traffic to canary - stage: finish - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 promote golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: check_canary_33 - on: - - approved - abort_deployment_33: - title: Discarding canary at 33% - stage: 'canary 33%' - image: kostiscodefresh/kubectl-argo-rollouts:latest - commands: - - /app/kubectl-argo-rollouts-linux-amd64 undo golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" - when: - steps: - - name: check_canary_33 - on: - - denied - exit_33: - title: Stopping pipeline - stage: 'canary 33%' - image: alpine:39 - commands: - - echo "Canary failed" - - exit 1 - when: - steps: - - name: check_canary_33 - on: - - denied -{% endraw %} -{% endhighlight %} - -Just before the approval, you can optionally execute the Argo Rollouts CLI to see what is happening behind the scenes: - -``` -kubectl argo rollouts get rollout golang-sample-app-deployment --watch -n canary -``` - -It should show the status of the canary pods with amount of traffic that is redirected to it. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/canary-watch.png" -url="/images/guides/progressive-delivery/canary-watch.png" -alt="Argo Rollouts CLI" -caption="Argo Rollouts CLI" -max-width="100%" -%} - -In the above picture, the canary deployment has just started. There is only one pod for the canary that gets 10% of live traffic. The four pods of the previous version still receive 90% percent of live traffic. - -You can also see the traffic split in the [LinkerD Dashboard](https://linkerd.io/2.10/reference/architecture/#dashboard){:target="\_blank"}: - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/canary-traffic-split.png" -url="/images/guides/progressive-delivery/canary-traffic-split.png" -alt="Linkerd Traffic split details" -caption="Linkerd Traffic split details" -max-width="80%" -%} - -The screenshot above is from the second stage of the canary where 33% of live traffic is redirected to the canary pods. -You can also get the same information from the command line with `kubectl get trafficsplit`. - -### Choosing a solution for automated metric analysis - -Canary deployments with manual pauses are great to get started but can quickly become cumbersome and error-prone. Ideally, the canary should automatically promote itself if the application "looks good". One of the most straightforward ways to examine application health is by reading its metrics and decide on the progress of the canary in a completely automated way. - -There are two main sources for metrics that you can use - -1. Application-specific metrics. This requires instrumentation in your application but is very powerful as you can query exactly what you want. -1. Cluster-level metrics (i.e. from the service mesh). These are very easy to set up, but are generic and deal mostly with the traffic the application receives. - - -Argo Rollouts has native integration for [several metric providers](https://argoproj.github.io/argo-rollouts/features/analysis/){:target="\_blank"}. We will use Prometheus in our example. -The example application [is already instrumented](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/main.go#L51){:target="\_blank"} to expose some basic metrics. - -First, you need to install Prometheus by following [the official documentation](https://prometheus.io/docs/prometheus/latest/installation/){:target="\_blank"}. Then you need to make sure that Prometheus will actually scrape your application. Prometheus has [native service discovery for Kubernetes](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config){:target="\_blank"} but you need to enable it in the configuration. - -If you [install Prometheus with the Helm chart](https://github.com/prometheus-community/helm-charts){:target="\_blank"}, Kubernetes service discovery is already enabled. The only thing to set up is to add the `prometheus.io/scrape: "true"` annotation in your rollout so that Prometheus does not ignore your application. - -You can optionally install [Graphana](https://grafana.com/){:target="\_blank"} so that you can inspect your application metrics before using them in the canary process. The example application has an [basic dashboard](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/graphana/graphana-dashboard.json){:target="\_blank"} -that you can import: - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/graphana-dashboard.png" -url="/images/guides/progressive-delivery/graphana-dashboard.png" -alt="Prometheus metrics from the application" -caption="Prometheus metrics from the application" -max-width="90%" -%} - -Next you need a way to filter your metrics so that you can query only those from the canary pods and not the stable pods. There are many ways to do this, but the easiest one is to simply have Argo Rollouts put special labels/tags in the canary pods. Then you can write any Prometheus Query and focus only on the canary instances: - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/canary-metrics.png" -url="/images/guides/progressive-delivery/canary-metrics.png" -alt="Canary metrics during a deployment" -caption="Canary metrics during a deployment" -max-width="100%" -%} - -For the decision on how to promote the canary, you need to examine your application and decide which metrics you consider representative for the health of the application. -For our example we have a simple query that checks number of successful calls (i.e. that return HTTP code 200) vs the number of all calls. Every number below 100% means that the application has calls that return with error. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/canary-decision.png" -url="/images/guides/progressive-delivery/canary-decision.png" -alt="The query that will promote or cancel the canary" -caption="The query that will promote or cancel the canary" -max-width="100%" -%} - -Note that Argo Rollouts can evaluate multiple queries when deciding if the canary is health or not. You are not constrained to a single query. - - -### Canary deployment with metric evaluation - -Once your have your metric solution in place we need to instruct Argo Rollouts to use it during a deployment. - -This happens with an [Analysis CRD](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/canary-with-metrics/analysis.yaml){:target="\_blank"}. - -`analysis.yaml` -```yaml -apiVersion: argoproj.io/v1alpha1 -kind: AnalysisTemplate -metadata: - name: success-rate -spec: - args: - - name: service-name - metrics: - - name: success-rate - interval: 2m - count: 2 - # NOTE: prometheus queries return results in the form of a vector. - # So it is common to access the index 0 of the returned array to obtain the value - successCondition: result[0] >= 0.95 - provider: - prometheus: - address: http://prom-release-prometheus-server.prom.svc.cluster.local:80 - query: sum(response_status{app="{{args.service-name}}",role="canary",status=~"2.*"})/sum(response_status{app="{{args.service-name}}",role="canary"}) -``` - -This Analysis template instructs Argo Rollouts to contact the internal Prometheus server every two minutes for a query that checks the successful HTTP calls -to the application. If the percentage of HTTP calls that return 200 is more than 95% then the canary will be promoted. Otherwise the canary will fail. - -The Analysis can be reused by multiple deployments as the name of the service is configurable. The parameter is filled in the Rollout definition. - -`rollout.yaml` (excerpt) -```yaml -spec: - replicas: 4 - strategy: - canary: - canaryService: rollout-canary-preview - stableService: rollout-canary-active - canaryMetadata: - annotations: - role: canary - labels: - role: canary - trafficRouting: - smi: - trafficSplitName: rollout-example-traffic-split - rootService: rollout-canary-all-traffic - steps: - - setWeight: 10 - - setCanaryScale: - weight: 25 - - pause: {duration: 5m} - - setWeight: 33 - - setCanaryScale: - weight: 50 - - pause: {duration: 5m} - analysis: - templates: - - templateName: success-rate - startingStep: 4 # delay starting analysis run until setWeight: 10% - args: - - name: service-name - value: golang-sample-app -``` - -Here you can see that instead of waiting for ever after each canary step, we instead wait for 5 minutes at 10% of traffic and 5 more minutes at 50% of traffic. During that time the Prometheus Analysis is running automatically behind the scenes. - -The Codefresh pipeline is now very simple: - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/canary-metrics-pipeline.png" -url="/images/guides/progressive-delivery/canary-metrics-pipeline.png" -alt="Fully automated Canary pipeline" -caption="Fully automated Canary pipeline" -max-width="100%" -%} - -This pipeline does the following: - -1. [Clones]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) the source code of the application. -1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image. -1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest and creates a new version and starts the canary process. - -Here is the pipeline definition: For more information, see [What is the Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: "1.0" -stages: - - prepare - - build - - deploy -steps: - clone: - type: "git-clone" - stage: prepare - description: "Cloning main repository..." - repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' - revision: "${{CF_BRANCH}}" - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: kostiscodefresh/argo-rollouts-canary-sample-app - working_directory: "${{clone}}" - tags: - - "latest" - - '${{CF_SHORT_REVISION}}' - dockerfile: Dockerfile - build_arguments: - - git_hash=${{CF_SHORT_REVISION}} - start_deployment: - title: Start canary - stage: deploy - image: codefresh/cf-deploy-kubernetes:master - working_directory: "${{clone}}" - commands: - - /cf-deploy-kubernetes ./canary-with-metrics/service.yaml - - /cf-deploy-kubernetes ./canary-with-metrics/service-preview.yaml - - /cf-deploy-kubernetes ./canary-with-metrics/service-all.yaml - - /cf-deploy-kubernetes ./canary-with-metrics/analysis.yaml - - /cf-deploy-kubernetes ./canary-with-metrics/rollout.yaml - environment: - - KUBECONTEXT=mydemoAkscluster@BizSpark Plus - - KUBERNETES_NAMESPACE=canary -{% endraw %} -{% endhighlight %} - -The pipeline is very simple because Argo Rollouts does all the heavy lifting behind the scenes. - -You can see the Analysis running with - -``` -kubectl argo rollouts get rollout golang-sample-app-deployment --watch -n canary -``` - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/canary-watch-metrics.png" -url="/images/guides/progressive-delivery/canary-watch-metrics.png" -alt="Running the Analysis in the background" -caption="Running the Analysis in the background" -max-width="100%" -%} - -For each deployment you can also see the result of the Analysis along with the canary pods. The number next to the checkmark shows how many times the analysis will run (this is defined by the `count` property in the Analysis file). See the [Canary specification](https://argoproj.github.io/argo-rollouts/features/canary/) for more parameters. - -## Monitoring the Argo Rollouts controller - -Regardless of whether you use metric evaluation for your own applications, Argo Rollouts itself exposes Prometheus metrics -for its internal functionality. You can ingest those metrics like any other Prometheus application -and create your own dashboards if you want to get some insights on what the controller is doing. - -You can find an example dashboard at [https://github.com/argoproj/argo-rollouts/blob/master/examples/dashboard.json](https://github.com/argoproj/argo-rollouts/blob/master/examples/dashboard.json){:target="\_blank"} that can be used as a starting point. - -{% include image.html -lightbox="true" -file="/images/guides/progressive-delivery/monitor-rollout.png" -url="/images/guides/progressive-delivery/monitor-rollout.png" -alt="Integrated metrics from the Argo Rollouts controller" -caption="Integrated metrics from the Argo Rollouts controller" -max-width="80%" -%} - - -For more details, see the [metrics documentation page](https://argoproj.github.io/argo-rollouts/features/controller-metrics/){:target="\_blank"}. - -## Using Argo Rollouts with GitOps - -For simplicity reasons, we covered progressive delivery in this page using Argo Rollouts on its own. Argo Rollouts integrates seamlessly with Argo CD bringing together GitOps and Progressive delivery. - -If you use Argo CD and Argo Rollouts together you will also have access to the Codefresh GitOps dashboard to manage your deployments: - -{% include image.html - lightbox="true" - file="/images/guides/gitops/gitops-dashboard.png" - url="/images/guides/gitops/gitops-dashboard.png" - alt="The Codefresh GitOps dashboard" - caption="The Codefresh GitOps dashboard" - max-width="60%" - %} - - -See our [GitOps page]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/) for more details. - - - -## Related articles -[Deploying to predefined environments]({{site.baseurl}}/docs/ci-cd-guides/environment-deployments/) -[GitOps Deployments]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/) -[Pipelines for microservices]({{site.baseurl}}/docs/ci-cd-guides/microservices/) - - - - - diff --git a/_docs/ci-cd-guides/pull-request-branches.md b/_docs/ci-cd-guides/pull-request-branches.md deleted file mode 100644 index 1414d7337..000000000 --- a/_docs/ci-cd-guides/pull-request-branches.md +++ /dev/null @@ -1,354 +0,0 @@ ---- -title: "Pull requests and branches" -description: "Handle builds for pull requests or other branches" -group: ci-cd-guides -toc: true ---- - -Codefresh has native support for working with different branches and building pull requests. In particular, it has a very rich trigger model that allows you to handle specific events (such as opening a pull request or adding a comment). - -The possible actions can be seen in the trigger dialog of your pipeline: - -{% include image.html -lightbox="true" -file="/images/pipeline/triggers/add-git-trigger.png" -url="/images/pipeline/triggers/add-git-trigger.png" -alt="Adding GIT Trigger" -max-width="50%" -%} - -Notice however that Codefresh capabilities are always based on what your Git provider is offering. If your Git provider does not support webhooks for specific events, then these will not be available in the trigger dialog. - -## Building branches automatically - -By default, Codefresh connects to your Git provider and does the following: - -1. Auto-builds every new commit that happens in master or any other branch -2. Auto-builds every new branch when it is created - -You can change the default behavior so that it matches your own workflow using extra [Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/). - -You don't have to do anything special to set up this communication between Codefresh and your Git provider. It was set up automatically when you connected your Codefresh account to your Git provider. - -Codefresh also creates a default Git trigger the first time you create a project. - -{% include -image.html -lightbox="true" -file="/images/pipeline/triggers/default-git-trigger.png" -url="/images/pipeline/triggers/default-git-trigger.png" -alt="Default GIT trigger" -caption="Default GIT trigger" -max-width="50%" -%} - -If you create a new branch in your repository, Codefresh automatically builds it and also stores the resulting Docker image. - -``` -git checkout -b another-branch -[..make changes...] -git commit -a -m "My changes" -git push -u origin another-branch -``` - -The build will clearly define its source branch: - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/auto-branch-build.png" -url="/images/guides/branches-pull-requests/auto-branch-build.png" -alt="Building automatically new branches" -caption="Building automatically new branches" -max-width="100%" -%} - -When you commit to a Pull Request (PR), Codefresh auto-builds the PR, and you can also see the build request in the GitHub UI as well: - -{% include -image.html -lightbox="true" -file="/images/getting-started/quick-start-test-pr/auto-build-pr.png" -url="/images/getting-started/quick-start-test-pr/auto-build-pr.png" -alt="Pull Request Status" -caption="Pull Request Status (click image to enlarge)" -max-width="50%" -%} - -## Building specific branches manually - -Sometimes you want to run an ad-hoc build on a specific branch without actually committing anything. You can do that in the [run dialog of a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/#creating-new-pipelines) by selecting a branch from the dropdown menu. - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/build-specific-branch.png" -url="/images/guides/branches-pull-requests/build-specific-branch.png" -alt="Building a specific branch" -caption="Building a specific branch" -max-width="50%" -%} - -From the same dialog, you can also select a specific trigger to "emulate" for this branch if you have connected multiple triggers on the same pipeline. - -## Restricting which branches to build - -The auto-build nature of Codefresh for all branches is what you want most times. For larger projects, you might wish to restrict pipelines running only on specific branches. - -This is performed by defining [the branch field]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#pull-request-target-branch-and-branch) in the trigger dialog with a regular expression. - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/restrict-branch.png" -url="/images/guides/branches-pull-requests/restrict-branch.png" -alt="Restrict a pipeline to a single branch" -caption="Restrict a pipeline to a single branch" -max-width="50%" -%} - -The trigger above will only be activated for the `production` branch, so if a developer creates a new branch this pipeline will not run for it. Remember also that this field is actually a regular expression, so you can restrict a pipeline to a specific naming pattern (i.e. a group of branch names). - -Another popular filtering mechanism is to keep the auto-build nature of Codefresh, but enable/disable specific pipeline steps according to the branch being built. This is performed by using [step conditions]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/). -Here is an example: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -stages: - - prepare - - build - - deploy -steps: - main_clone: - title: Cloning main repository... - stage: prepare - type: git-clone - repo: 'codefresh-contrib/spring-boot-2-sample-app' - revision: master - git: github - build_app_image: - title: Building Docker Image - type: build - stage: build - image_name: spring-boot-2-sample-app - working_directory: ./ - tag: 'multistage' - dockerfile: Dockerfile - deploy_production: - title: Deploying to production - type: deploy - stage: deploy - kind: kubernetes - cluster: 'my-prod-cluster' - namespace: default - service: my-prod-app - candidate: - image: '${{build_app_image}}' - registry: 'dockerhub' - when: - branch: - only: - - master - deploy_staging: - title: Deploying to staging - type: deploy - stage: deploy - kind: kubernetes - cluster: 'my-staging-cluster' - namespace: development - service: my-staging-app - candidate: - image: '${{build_app_image}}' - registry: 'dockerhub' - when: - branch: - only: - - /^JIRA-FEATURE-.*/i -{% endraw %} -{% endhighlight %} - -This pipeline will execute for **ALL** branches and pull requests, but: - -1. If the branch is `master` it will deploy the Docker image to the production cluster and namespace `default` -1. If the branch starts with `JIRA-FEATURE-` (e.g. JIRA-FEATURE-1234, JIRA-FEATURE-testing, JIRA-FEATURE-fixbbug), it will deploy to a staging cluster to namespace `development` -1. In all other cases of branches or pull requests, it will just build the Docker image without deploying it anywhere - -You can see that if a developer creates an unrelated branch (that doesn't match the expected name), no deployment will take place: - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/branch-step-condition.png" -url="/images/guides/branches-pull-requests/branch-step-condition.png" -alt="Restrict pipeline steps according to branch" -caption="Restrict pipeline steps according to branch" -max-width="80%" -%} - -This is a more granular way to control how your branch affects your pipeline. - ->We recommend you follow the first method of having multiple simple pipelines with different branch expressions in the trigger dialog, instead of having a single complex pipeline using step conditions. Remember that in Codefresh you can create as many pipelines as you want for a single project instead of being limited to one pipeline per project. - -## Handling pull request events - -The power of Codefresh becomes evident when you realize that you can have extra pipelines that respond to specific PR events. For example, you can have a specific pipeline that runs **only** when a PR is opened for the first time or when a PR is closed. - -You can see all supported PR events in the trigger dialog. - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/choosing-pr-events.png" -url="/images/guides/branches-pull-requests/choosing-pr-events.png" -alt="Choosing PR events for a pipeline" -caption="Choosing PR events for a pipeline" -max-width="80%" -%} - ->Remember that the events shown are those supported by your Git provider. Not all Git providers support all possible pull request events. - -You can select multiple pull request events for a single pipeline, or have multiple pipelines that respond to individual pull request events. There is no right or wrong answer as it mostly depends on how your team handles pull requests. - -The most useful events are: - -* Pull request open -* Pull request sync (when a commit happens to a PR) -* Pull request closed -* Comment added on a pull request - -There is also the shortcut checkbox for *any PR event* if you don't care about which specific event happened. - -## Trunk Based Development - -One of the most popular Git workflows is [Trunk Based development](https://trunkbaseddevelopment.com/){:target="\_blank"} with short-lived feature branches. - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/trunk-based-development.png" -url="/images/guides/branches-pull-requests/trunk-based-development.png" -alt="Trunk Based Development" -caption="Trunk Based Development" -max-width="100%" -%} - -In this process, the master branch is always ready for production. The feature branches are created from the master and can have several commits before being merged back to master. - -This process can be easily created in Codefresh with two separate pipelines: - -* The "main" pipeline that deploys master to the production environment -* The feature pipeline that checks each feature as it is developed (and optionally deploys it to a staging environment) - -As an example, here is a minimal pipeline for the master branch: - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/production-pipeline.png" -url="/images/guides/branches-pull-requests/production-pipeline.png" -alt="Pipeline that deploys to production" -caption="Pipeline that deploys to production" -max-width="100%" -%} - -The pipeline: - -1. Checks out the source code -1. Builds a Docker image -1. Creates and stores a Helm chart -1. Deploys the chart to Kubernetes - -The pipeline for feature branches is different: - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/feature-pipeline.png" -url="/images/guides/branches-pull-requests/feature-pipeline.png" -alt="Pipeline for feature branches" -caption="Pipeline for feature branches" -max-width="100%" -%} - -For each feature branch: - -1. We check out the code -1. Run linters on the source code -1. Build the Docker image -1. Run some unit tests to verify the Docker image (possible with [service containers]({{site.baseurl}}/docs/pipelines/service-containers/)) - -To implement trunk-based development, we create two triggers for these pipelines. For the production pipeline, we just make sure that the trigger is only launched when commits land on master (and only there). - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/trigger-for-production-pipeline.png" -url="/images/guides/branches-pull-requests/trigger-for-production-pipeline.png" -alt="Trigger for production pipeline" -caption="Trigger for production pipeline" -max-width="50%" -%} - -For the feature branch pipeline, we check the events for: - -* PR (pull request) Open -* PR Sync (when a commit happens on the PR) - -For the [branch specifications]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#pull-request-target-branch-and-branch) we make sure that we look only for Pull Requests that are targeted **AT** `master`. - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/trigger-for-features.png" -url="/images/guides/branches-pull-requests/trigger-for-features.png" -alt="Trigger for pull request pipeline" -caption="Trigger for pull request pipeline" -max-width="50%" -%} - -With this configuration, the whole process is as follows: - -1. A developer creates a new branch from master. Nothing really happens at this point. -1. The developer opens a new PR for this branch. The feature pipeline runs (because of the PR open checkbox). -1. The developer makes one or more commits to the branch. The feature pipeline runs again for each commit (because of the PR sync checkbox). -1. The developer commits the branch back to master. The main pipeline runs and deploys to production. - -You can fine-tune this workflow according to your needs. For example, you might also specify a naming pattern on the branches for the PR (e.g. feature-xxx) to further restrict which branches are considered ready for production. - -> We didn't need to handle the PR close/merge events. As soon as a PR is merged back to master, the Git provider sends anyway an event that a commit has happened in master, which means that the main production pipeline will take care of releasing the contents of master. - -## Git-flow - -[Git Flow](https://nvie.com/posts/a-successful-git-branching-model/){:target="\_blank"} is another popular management process for Git branches. For brevity reasons, we will not list all the details for all branch types, but it should be obvious that you can recreate all aspects of Git flow with Codefresh triggers. - -For example, to run a pipeline only for pull requests from branches named `feature-XXX` that will be merged back to `develop` branch, you can create a trigger like this: - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/git-flow-feature-trigger.png" -url="/images/guides/branches-pull-requests/git-flow-feature-trigger.png" -alt="Git flow feature branch trigger" -caption="Git flow feature branch trigger" -max-width="50%" -%} - -To launch a pipeline that will only run when a commit happens on a release branch named `release-XXX`, you can create a trigger like this: - -{% include image.html -lightbox="true" -file="/images/guides/branches-pull-requests/git-flow-release-pipeline-trigger.png" -url="/images/guides/branches-pull-requests/git-flow-release-pipeline-trigger.png" -alt="Git flow release branch trigger" -caption="Git flow release branch trigger" -max-width="50%" -%} - -In a similar manner, you can create the triggers for all other branch types in Git flow. - -## Create your own workflow - -Trunk-based development and Git-flow are only some examples of what a Git workflow can look like. Your organization might follow a completely different process. Using the basic building blocks of Codefresh triggers (branch field, PR checkboxes, etc) you should be able to model your own workflow according to your own pipelines. - -## Related articles -[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) -[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) -[Git triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) -[YAML examples]({{site.baseurl}}/docs/example-catalog/examples/) -[Preview environments]({{site.baseurl}}/docs/ci-cd-guides/preview-environments/) - - - - - diff --git a/_docs/ci-cd-guides/working-with-docker-registries.md b/_docs/ci-cd-guides/working-with-docker-registries.md deleted file mode 100644 index 28ab8c91b..000000000 --- a/_docs/ci-cd-guides/working-with-docker-registries.md +++ /dev/null @@ -1,573 +0,0 @@ ---- -title: "Work with Docker Registries" -description: "Push, pull, and tag Docker images in Codefresh pipelines" -group: ci-cd-guides -redirect_from: - - /docs/build-specific-revision-image/ - - /docs/image-management/build-specific-revision-image/ - - /docs/docker-registries/working-with-docker-registries/ -toc: true ---- - -Codefresh contains first-class Docker registry support. This means that you don't need to manually write `docker login` and `docker pull/push` commands within pipelines. You can use declarative YAML, and all credentials are configured in a central location once. - -## Viewing Docker images - -To see all images from [all connected registries]({{site.baseurl}}/docs/integrations/docker-registry/docker-registries/): - -* In the Codefresh UI, from the Artifacts section in the sidebar, select [**Images**](https://g.codefresh.io/images/){:target="\_blank"}. - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/docker-registry-list.png" - url="/images/guides/working-with-images/docker-registry-list.png" - alt="Codefresh Registry Image List" - caption="Codefresh Registry Image List" - max-width="70%" -%} - -Each image displays basic details such as the Git branch, commit message, hash that created it, creation date, as well as all tags. -* To view image metadata, click on the image. For details, see [Docker image metadata]({{site.baseurl}}/docs/pipelines/docker-image-metadata/). - - -**Filters for Docker images** -The top left of the Images page has several filters that allow you to search for a specific subset of Docker images. -Filters include: -* Tagged/untagged images -* Base image name -* Git branch -* Tag -* Pipeline volumes - -Multiple filters work in an `AND` manner. - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/docker-registry-filters.png" - url="/images/guides/working-with-images/docker-registry-filters.png" - alt="Codefresh Registry Image filters" - caption="Codefresh Registry Image filters" - max-width="40%" -%} - - -**Actions for Docker images** -On the right are the actions available foreach Docker image. -You can: -* Launch a Docker image as a [test environment]({{site.baseurl}}/docs/getting-started/on-demand-environments/) -* Promote a Docker image (explained in the following sections) -* Pull the image locally on your workstation with different commands -* Re-run the pipeline that created the image - - -## Pulling Docker images - -Pulling Docker images in Codefresh is completely automatic. You only need to mention a Docker image by name, and Codefresh automatically pulls it for you and uses it in a pipeline. - -### Pulling public images - -To pull a public image from Docker Hub or other public registries: - -* Specify the name of the image and tag that you want to use. - -For example: - -```yaml -CollectAllMyDeps: - title: Install dependencies - image: python:3.6.4-alpine3.6 - commands: - - pip install . -``` - -This [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) pulls the image `python:3.6.4-alpine3.6` from Docker Hub, and then runs the command `pip install .` inside it. -You can see the images that get pulled in the [Codefresh pipeline log]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/). - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/pull-public-image.png" - url="/images/guides/working-with-images/pull-public-image.png" - alt="Pulling a public image" - caption="Pulling a public image" - max-width="70%" -%} - -The image is also automatically cached in the [image cache]({{site.baseurl}}/docs/pipelines/pipeline-caching/#distributed-docker-image-caching). - -Codefresh also automatically pull for you any images mentioned in Dockerfiles (i.e. the `FROM` directive) as well as [service containers]({{site.baseurl}}/docs/pipelines/service-containers/). - - -### Pulling private images - -To pull a private image from one of your connected registries, in addition to specifying the image by name and tag, you must also prepend the appropriate prefix of the registry domain. The registry domain prefix is required for Codefresh to understand that it is a private image. - -For example, in the case of ACR (Azure Container Registry): - -``` -registry-name.azurecr.io/my-docker-repo/my-image-name:tag -``` - -Get the full name of a Docker image: -* In the Codefresh UI, from the Artifacts section in the sidebar, select [**Images**](https://g.codefresh.io/images/){:target="\_blank"}. -* Click on the image and copy the image name from the Activity column, **Image promoted** label. - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/image-dashboard-tag.png" - url="/images/guides/working-with-images/image-dashboard-tag.png" - alt="Looking at tag of a private image" - caption="Looking at tag of a private image" - max-width="65%" -%} - -The exact format of the image name depends on the type of registry you use. Codefresh uses the domain prefix of each image to understand which integration to use, and then takes care of all `docker login` and `docker pull` commands on its own behind the scenes. - -For example, if you have connected [Azure]({{site.baseurl}}/docs/integrations/docker-registries/azure-docker-registry/){:target="\_blank"}, [AWS]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/){:target="\_blank"}, and [Google]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/){:target="\_blank"} registries, you can pull three images for each in a pipeline like this: - - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -steps: - my_go_unit_tests: - title: Running Go Unit tests - image: 'us.gcr.io/project-k8s-sample-123454/my-golang-app:prod' - commands: - - go test -v - my_mvn_unit_tests: - title: Running Maven Unit tests - image: '123456789012.dkr.ecr.us-west-2.amazonaws.com/my-java-app:latest' - commands: - - mvn test - my_python_unit_tests: - title: Running Python Unit tests - image: 'my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:master' - commands: - - python setup.py test -{% endraw %} -{% endhighlight %} - -Codefresh automatically logs in to each registry using the credentials you have defined centrally, and pulls all the images. The same thing will happen with Dockerfiles that mention any valid Docker image in their `FROM` directive. - - -### Pulling images created in the same pipeline - -Codefresh allows you to create a Docker image on demand and use it in the same pipeline that created it. In several scenarios (such as [unit tests]({{site.baseurl}}/docs/testing/unit-tests/)), it is very common to use a Docker image right after it is built. - -In that case, as a shortcut, Codefresh allows you to simply [specify the name]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables) of the respective [build step]({{site.baseurl}}/docs/pipelines/steps/build/). - - `codefresh.yml` -{% highlight yaml %} -{% raw %} -version: '1.0' -steps: - main_clone: - title: Cloning main repository... - type: git-clone - repo: 'codefresh-contrib/python-flask-sample-app' - revision: 'master' - git: github - MyAppDockerImage: - title: Building Docker Image - type: build - image_name: my-app-image - working_directory: ./ - tag: 'master' - dockerfile: Dockerfile - MyUnitTests: - title: Running Unit tests - image: '${{MyAppDockerImage}}' - commands: - - python setup.py test - -{% endraw %} -{% endhighlight %} - -In the above pipeline, Codefresh: - -1. Checks out source code through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). -1. Builds a Docker image, named `my-app-image:master`. Notice the lack of `docker push` commands. -1. In the next step, automatically uses that image and runs `python setup.py test` inside it. Again, notice the lack of `docker pull` commands. - -The important line here is the following: - -{% highlight yaml %} -{% raw %} - image: ${{MyAppDockerImage}} -{% endraw %} -{% endhighlight %} - -This says to Codefresh "in this step please use the Docker image that was built in the step named `MyAppDockerImage`". - -You can see the automatic pull inside the Codefresh logs. - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/pull-private-image.png" - url="/images/guides/working-with-images/pull-private-image.png" - alt="Auto-Pulling a private image" - caption="Auto-Pulling a private image" - max-width="70%" -%} - -The image is still pushed to your default Docker registry. If you don't want this behavior, add the `disable_push` property in the build step. - - -## Pushing Docker images - -Pushing to your default Docker registry is completely automatic. All successful [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) automatically push to the default Docker registry of your Codefresh account without any extra configuration. - -To push to another registry, you only need to know how this registry is [connected to Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/), and more specifically, what is the unique name of the integration. You can see the name from your [Docker Registry integrations](https://g.codefresh.io/account-admin/account-conf/integration/registryNew), or asking your Codefresh administrator. - - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/linked-docker-registries.png" - url="/images/guides/working-with-images/linked-docker-registries.png" - alt="Name of linked Docker Registries" - caption="Name of linked Docker Registries" - max-width="50%" -%} - -Once you know the registry identifier, you can use it in any [push step]({{site.baseurl}}/docs/pipelines/steps/push/) or [build step]({{site.baseurl}}/docs/pipelines/steps/build/) by specifying the registry with that name: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} - build_image: - title: Building my app image - type: build - image_name: my-app-image - dockerfile: Dockerfile - tag: 'master' - push_to_registry: - title: Pushing to Docker Registry - type: push - #Name of the build step that is building the image - candidate: '${{build_image}}' - tag: '1.2.3' - # Unique registry name - registry: azure-demo -{% endraw %} -{% endhighlight %} - -Notice that - * the `candidate` field of the push step mentions the name of the build step (`build_image`) that will be used for the image to be pushed. - * The registry is only identified by name (`azure-demo` in the example). The domain and credentials are not part of the pipeline as they are already known to Codefresh through the Docker registry integration. - - You can also override the name of the image with any custom name. This way the push step can choose any image name regardless of what was used in the build step. - - `codefresh.yml` -{% highlight yaml %} -{% raw %} - build_image: - title: Building my app image - type: build - image_name: my-app-image - dockerfile: Dockerfile - tag: 'master' - push_to_registry: - title: Pushing to Docker Registry - type: push - #Name of the build step that is building the image - candidate: '${{build_image}}' - tag: '1.2.3' - # Unique registry name - registry: azure-demo - image_name: my-company/web-app -{% endraw %} -{% endhighlight %} - -Here the build step creates an image named `my-app-image:master`, but the push step actually pushes it as `my-company/web-app:1.2.3`. - -For more examples, such as using multiple tags, or pushing in parallel, see the [push examples]({{site.baseurl}}/docs/pipelines/steps/push/#examples) - -### Pushing images with an optional prefix - -There are some registry providers that require a specific prefix for all your Docker images. This is often the name of an organization, account, or other top-level construct defined by the registry. - - `codefresh.yml` -{% highlight yaml %} -{% raw %} - build: - title: "Building Docker image" - type: "build" - image_name: "acme-company/trivial-go-web" - working_directory: "${{clone}}" - tag: "latest" - dockerfile: "Dockerfile.multistage" - stage: "build" - registry: azure -{% endraw %} -{% endhighlight %} - -The example above will push the final Docker image as `kostisazureregistry.azurecr.io/acme-company/trivial-go-web:latest`. - -However, you can also set up the prefix globally once in the [Docker registry integrations]({{site.baseurl}}/docs/integrations/docker-registries/). This way you can simplify your pipelines and have them mention only the final image name. - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/registry-prefix.png" - url="/images/guides/working-with-images/registry-prefix.png" - alt="Global registry prefix" - caption="Global registry prefix" - max-width="70%" -%} - -Using the repository prefix in the example above, automatically prefixes all your Docker images with `acme-company`. - -Now you can simplify your build/push step as below: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} - build: - title: "Building Docker image" - type: "build" - image_name: "trivial-go-web" - working_directory: "${{clone}}" - tag: "latest" - dockerfile: "Dockerfile.multistage" - stage: "build" - registry: azure -{% endraw %} -{% endhighlight %} - -The final Docker image will still be `kostisazureregistry.azurecr.io/acme-company/trivial-go-web:latest`. - -## Working with multiple registries with the same domain - -With Codefresh, you can [connect multiple registries on a global level]({{site.baseurl}}/docs/integrations/docker-registries/). This allows you to create pipelines that push/pull images to different registries without having to deal with Docker credentials within the pipeline itself. - -However, there are several times where you have multiple registries that have the same domain. For example, you might have two Docker Hub accounts connected to Codefresh (so both of them can resolve images for the `docker.io` domain). - -This means that when you reference an image by domain name, as in a freestyle step for example, Codefresh might not know which Docker registry account to use for the pull action. - -> This is not a Codefresh limitation, but a Docker one. Even with vanilla Docker you cannot log in to multiple registries at the same time if they share the same domain. - -To solve this problem, Codefresh automatically detects connected registries that have the same domain and allow you to designate the primary one. The primary registry is used for image resolution when pulling Docker images. - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/primary-dockerhub.png" - url="/images/guides/working-with-images/primary-dockerhub.png" - alt="Choosing a Docker registry as the primary one if they have the same domain" - caption="Choosing a Docker registry as the primary one if they have the same domain" - max-width="90%" -%} - -In the example above, even though two Docker Hub integrations are connected to Codefresh, only the primary one is used to pull images from the `docker.io` domain. You can still use the second one in push/build steps using the `registry` property. - -You can override the default behavior in each pipeline, by adding the optional `registry_context` property to instruct Codefresh on how to use a specific registry for pulling Docker images (if you have more than one for the same domain). - - - -You can use the `registry_context` property in [build]({{site.baseurl}}/docs/pipelines/steps/build/), [push]({{site.baseurl}}/docs/pipelines/steps/push/), [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/), and [composition]({{site.baseurl}}/docs/pipelines/steps/composition/) steps. - -The `registry_context` property takes as value the name of an external connected registry. Build and composition steps accept an array of values as `registry_contexts`. In all cases, by using this optional property you instruct Codefresh to use a specific registry for pulling images. - -> The optional `registry_context` and `registry_contexts` properties only affect the **pulling** of Docker images. The registry used for *pushing* images is still declared explicitly in build and push pipeline steps. - -The syntax for the freestyle step is the following: - -{% highlight yaml %} -{% raw %} - test: - title: "Running test" - type: "freestyle" - image: "gcr.io/my-google-project/my-image:latest" - registry_context: my-second-gcr-registry # define what registry will be used for pulling the image - working_directory: "${{clone}}" - commands: - - "ls" -{% endraw %} -{% endhighlight %} - -The syntax for the build step is the following: - -{% highlight yaml %} -{% raw %} - build: - title: "Building Docker image" - type: "build" - image_name: "trivial-go-web" - working_directory: "${{clone}}" - tag: "latest" - dockerfile: "Dockerfile.multistage" - stage: "build" - registry_contexts: # define what registries will be used for pulling images - - second-dockerhub - - production-azure - registry: azure -{% endraw %} -{% endhighlight %} - - -The syntax for the push step is the following: - -{% highlight yaml %} -{% raw %} - push: - title: "Pushing 1st Docker image" - type: push - image_name: "kostiscodefresh/trivial-go-web" - tag: "latest" - stage: "push" - registry: dockerhub # Docker registry to push to - registry_context: second-dockerhub # Docker registry to pull images from - candidate: ${{build}} -{% endraw %} -{% endhighlight %} - -The syntax for the composition step is the following: - -{% highlight yaml %} -{% raw %} - my-composition: - title: Running Composition - type: composition - registry_contexts: - - first-gcr - - second-gcr - arguments: - composition: - version: '2' - services: - db: - image: postgres - composition_candidates: - test_service: - image: 'alpine:3.9' - command: printenv - working_dir: /app - environment: - - key=value -{% endraw %} -{% endhighlight %} - -Let's look at an example. We assume that we have two GCR integrations: - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/two-gcr-integrations.png" - url="/images/guides/working-with-images/two-gcr-integrations.png" - alt="Two GCR integrations" - caption="Two GCR integrations" - max-width="90%" -%} - -The first integration is the "production" one, and the second integration is the "staging" one. The production one is designated as primary. This means that by default even though both integrations work for the `gcr.io` domain, only the primary one is used in the Codefresh pipeline for pulling images. - -Let's say however that you want to build a Docker image that has a `FROM` statement from an image that exists in the staging registry. The image should be pushed to the production registry. You can use the `registry_context` property as shown below: - - -{% highlight yaml %} -{% raw %} - build: - title: "Building Docker image" - type: "build" - image_name: "gcr.io/production-project/my-image" - working_directory: "${{clone}}" - tag: "latest" - dockerfile: "Dockerfile" - stage: "build" - registry: production-gcr - registry_contexts: # define what registries will be used for pulling images - - staging-gcr -{% endraw %} -{% endhighlight %} - -Behind the scenes Codefresh will: - -1. First log in to the "staging" Docker registry using the "staging" credentials. -1. Build the Docker image, by resolving the `FROM` statements with "staging" images, pulling them as needed using the staging credentials. -1. Tag the Docker image. -1. Log in to the "production" Docker registry. -1. Push the final Docker image to the "production" registry. - -If your primary Docker registry is also the one that is used in your pipelines, you don't need the `registry_context` property at all, as this is the default behavior. When searching for an image to pull Codefresh will look at all your Docker registries (if they manage only a single domain), plus your "primary" Docker registries in case you have multiple Docker registries for the same domain. - -## Promoting Docker images - -Apart from building and pushing a brand new Docker image, you can also "promote" a Docker image by copying it from one registry to another. -You can perform this action either from the Codefresh UI or automatically from pipelines. - - -### Promoting images via the Codefresh UI - -You have the capability to "promote" any image of your choosing and push it to an external registry you have integrated into Codefresh (such as Azure, Google, Bintray etc.). - - -1. In the Codefresh UI, from the Artifacts section in the sidebar, select [**Images**](https://g.codefresh.io/images/){:target="\_blank"}. -1. To promote an image, in the row with the image, click the **Promote Image** icon on the right. - -{% - include image.html - lightbox="true" - file="/images/guides/working-with-images/docker-image-promotion.png" - url="/images/guides/working-with-images/docker-image-promotion.png" - alt="Promoting a Docker image" - caption="Promoting a Docker image" - max-width="50%" -%} - -1. From the list of connected registries, select the target registry, and define the tag that you want to push. -1. To "copy" this image from the existing registry to the target registry, click **Promote**. - -### Promoting images in pipelines - -You can also copy images from one registry to the other within a pipeline. -This is accomplished by specifying an existing image in the `candidate` field of the push step. - -For example: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} - promote_to_production_registry: - title: Promoting to Azure registry - type: push - candidate: us.gcr.io/project-k8s-sample-123454/my-golang-app - tag: '1.2.3' - # Unique registry name - registry: azure-demo -{% endraw %} -{% endhighlight %} - -In the example above, we promote an image from [GCR]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/) to [ACR]({{site.baseurl}}/docs/integrations/docker-registries/azure-docker-registry/), which is already set up as `azure-demo`. - -You can even "promote" Docker images within the same registry by simply creating new tags. -For example: - - `codefresh.yml` -{% highlight yaml %} -{% raw %} - promote_to_production: - title: Marking image with prod tag - type: push - candidate: my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:1.2.3 - tag: 'production' - # Unique registry name - registry: azure-demo -{% endraw %} -{% endhighlight %} - -In the example above, the image `my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:1.2.3` is re-tagged as `my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:prod`. A very common pattern is to mark images with a special tag such as `prod` **after** the image has been deployed successfully. - - -## Related articles -[Push pipeline step]({{site.baseurl}}/docs/pipelines/steps/push/) -[External Docker registries]({{site.baseurl}}/docs/integrations/docker-registries/) -[Accessing a Docker registry from your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/access-docker-registry-from-kubernetes/) -[Build and push an image example]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/) - diff --git a/_docs/dashboards/dora-metrics.md b/_docs/dashboards/dora-metrics.md deleted file mode 100644 index 6230c340d..000000000 --- a/_docs/dashboards/dora-metrics.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "DORA metrics" -description: "Get insights into your deployments" -group: dashboards -toc: true ---- - -DevOps is a collaboration paradigm that is sometimes mistaken for being too abstract or too generic. In an effort to quantify the benefits of adopting DevOps, [Dora Research](https://www.devops-research.com/research.html#capabilities){:target="\_blank"} (acquired by Google in 2018), has introduced four key metrics that define specific goals for improving the software lifecycle in companies interested in adopting DevOps. - -DORA measures these metrics: - -* Deployment Frequency: How often an organization successfully releases to production -* Lead Time for Changes: The length of time for a commit to be deployed into production -* Change Failure Rate: The percentage of deployments causing a failure in production -* Time to Restore Service: The length of time for an organization to recover from a failure in production - -[Read more on DORA](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance){:target="\_blank"}. - -## DORA metrics in Codefresh - -Monitoring DORA metrics can help identify delivery issues in your organization by detecting bottlenecks among teams and optimize your workflows at technical or organizational levels. -Codefresh offers support for DORA metrics out of the box. - -* In the Codefresh UI, go to [DORA metrics](https://g.codefresh.io/2.0/dora-dashboard/dora){:target="\_blank"}. - -{% include -image.html -lightbox="true" -file="/images/reporting/dora-metrics.png" -url="/images/reporting/dora-metrics.png" -alt="DORA metrics report" -caption="DORA metrics report" -max-width="100%" -%} - -## Filters - -Use filters to define the exact subset of applications you are interested in. All filters support auto-complete and multiselect. -More than one option within the same filter type has an OR relationship. Multiple filter types when defined share an AND relationship. - -* Runtimes: Show metrics for applications from selected runtimes -* Clusters: Show metrics for applications deployed to selected clusters -* Applications: Show metrics for selected applications. -* Time: Show metrics from application for a specific time period - -> When no filters are defined, all metrics are shown for the last 90 days. - -## Metrics for favorite applications -If you have [starred applications as favorites]({{site.baseurl}}/docs/deployment/applications-dashboard/#applications-dashboard-information) in the Applications dashboard, clicking {::nomarkdown}{:/} in DORA metrics, displays DORA metrics only for those applications. - - -## Metric totals -As the title indicates, the Totals bar shows the total numbers, based on the filters defined, or for the last 90 days, if there are no filters: - -* Deployments -* Rollbacks -* Commits/Pull Requests -* Failure Rate: The number of failed deployments divided by the total number of deployments - -## Metric graphs -The metric graphs are key to performance insights with DORA metrics. The metrics are again based on the filters defined, or for the last 90 days if there are no filters. - -In addition, you can select the granularity for each graph: - -* Daily -* Weekly -* Monthly - ->Tip: - Remember that the graphs for the DORA metrics reflect metrics of application deployments, not workflows. - -**Deployment Frequency** - The frequency at which applications are deployed to production, including both successful (Healthy) and failed (Degraded), deployments. A deployment is considered an Argo CD sync where there was a change in the application source code that resulted in a new deployment of the application to production. - The X-axis charts the time based on the granularity selected, and the Y-axis charts the number of deployments. The number shown on the top right is the average deployment frequency based on granularity. - -**Lead Time for Changes** - The average number of days from the first commit for a PR (pull request) until the deployment date for the same PR. The key term here is _deployment_. Lead Time for Changes considers only those changes to workflows that result in a deployment. Making a change to a repo that does not result in a deployment is not included when calculating Lead Time for Changes. - The X-axis charts the time based on the granularity selected, and the Y-axis charts the time in minutes until the deployment. The number shown on the top right is the average number of days for a commit to reach production. - -**Change Failure Rate** - The failure or rollback rate in percentage for applications whose health status changed to Degraded on deployment. The key term here is _on deployment_. For example, bumping an image tag with one that does not exist, results in the application being Degraded on deployment, and designated as failed. - The Change Failure Rate is derived by dividing the number of Degraded (failed/rollback) deployments with the total number of deployments. - The X-axis charts the time based on the granularity selected, and the Y-axis charts the failure rate. The number shown on the top right is the average failure rate based on granularity, and therefore may not be equal to the Total Failure Rate. - -**Time to Restore Service** - The average number of hours taken for the status of Degraded deployments to return to Healthy. Again, similar to the Change Failure Rate, Time to Restore Service includes only deployments that became Degraded. It is derived by dividing the total number of hours for all Degraded deployments to return to Healthy by the total number of Degraded deployments. - The X-axis charts the time based on the granularity, and the Y-axis charts the time in hours. The number shown on the top right is the average number of hours between the previous deployment and rollback for the same application. - -## Related articles -[Global analytics dashboard]({{site.baseurl}}/docs/dashboards/home-dashboard) -[Monitoring applications]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/) - diff --git a/_docs/dashboards/home-dashboard.md b/_docs/dashboards/home-dashboard.md deleted file mode 100644 index be0e4e505..000000000 --- a/_docs/dashboards/home-dashboard.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: "Global analytics dashboard" -description: "" -group: dashboards -toc: true ---- - -Get a global picture of runtimes, managed clusters, deployments, and pipelines in the Home dashboard. The Home dashboard is displayed when you log in to Codefresh, providing system-wide visualization in real-time. - -Global filters allow you to narrow the scope of the data, and drill down into specific entities for more details. - - {% include - image.html - lightbox="true" - file="/images/reporting/home-dashboard.png" - url="/images/reporting/home-dashboard.png" - alt="Home dashboard: Global enterprise analytics" - caption="Home dashboard: Global enterprise analytics" - max-width="70%" - %} - -### Global filters -Filter the view in the Home dashboard by runtimes and date range. - -{% include - image.html - lightbox="true" - file="/images/reporting/global-filters.png" - url="/images/reporting/global-filters.png" - alt="Home dashboard: Global filters" - caption="Home dashboard: Global filters" - max-width="60%" - %} - -### Runtimes and Managed Clusters - -Identify the health of the runtimes and managed clusters in your enterprise. -Health status is displayed for both hosted (if you have Hosted GitOps), and hybrid runtimes. - -Managed clusters are external clusters registered to runtimes to which you deploy applications and GitOps-managed resources. - - {% include - image.html - lightbox="true" - file="/images/reporting/runtimes-clusters-widgets.png" - url="/images/reporting/runtimes-clusters-widgets.png" - alt="Runtimes and Managed Clusters in the Home dashboard" - caption="Runtimes and Managed Clusters in the Home dashboard" - max-width="80%" - %} - -{: .table .table-bordered .table-hover} -| Item | Description | -| ------------------------| ---------------- | -|**Runtimes** | Identify failed runtimes.| -|**Managed Clusters** |{::nomarkdown} {:/}| - - -### Deployments - -Identify trends for deployments. - - {% include - image.html - lightbox="true" - file="/images/reporting/deployments-widget.png" - url="/images/reporting/deployments-widget.png" - alt="Deployments in the Home dashboard" - caption="Deployments in the Home dashboard" - max-width="70%" - %} - -{: .table .table-bordered .table-hover} -| Item | Description | -| ------------------------| ---------------- | -|**Daily/Weekly/Monthly** | Granularity for deployment views that affects the average number of deployments and the comparison to the reference period.| -|**Number and Comparison Average** | The number on the top right is the number of successful/failed/rollback deployments for the selected granularity. The percentage is the comparison to the reference period, also for the selected granularity. | -|**Successful** | The number of successful deployments per day, week, or month according to the selected granularity. | -|**Failed Deployments** | The number of successful deployments per day, week, or month according to the selected granularity. | -|**Rollbacks** | The number of successful deployments per day, week, or month according to the selected granularity. | - - - -### Applications - -Displays up to five of the most active applications and their current deployment status. - -{% include - image.html - lightbox="true" - file="/images/reporting/applications-widget.png" - url="/images/reporting/applications-widget.png" - alt="Applications in the Home dashboard" - caption="Applications in the Home dashboard" - max-width="70%" - %} - -{: .table .table-bordered .table-hover} - -| Item | Description | -| ------------------------| ---------------- | -|**Filter** | Filter applications by the cluster on which they are deployed. | -|**View** | Click to go to the Applications dashboard. See | -|**Application Name** | The name of the application, and the names of the runtime and cluster on which it is deployed. Click the name to drill down into the application in the Applications dashboard. | -|**Health status** | The health status of the application, and can be either:{::nomarkdown} To see the breakdown by health status, mouse over the chart.
The number at the end of the bar is the total number of deployments for the application, with the overall decrease or increase compared to the reference period. {:/} | - - - -### Delivery Pipelines - -> Delivery Pipline data is shown for hybrid enviroments. - -Displays all active pipelines for the selected date range, providing insights into trends for pipelines. Active pipelines are those with at least one active or completed workflow. -Analytics are derived by comparing the selected date range to the corresponding reference period. If your date range is the last seven days, the reference period is the seven days that precede the date range. - -{% include - image.html - lightbox="true" - file="/images/reporting/delivery-pipeline-widget.png" - url="/images/reporting/delivery-pipeline-widget.png" - alt="Delivery Pipelines in the Home dashboard" - caption="Delivery Pipelines in the Home dashboard" - max-width="80%" - %} - -{: .table .table-bordered .table-hover} - -| Item | Description | -| ------------------------| ---------------- | -|**Pipelines** | The number prefixed to the pipeline name indicates the change in position of the pipeline compared to the reference period. To drill down into a specific pipeline, click the pipeline.| -|**Filter** | The filters available to focus on the pipelines of interest:{::nomarkdown}{:/} | -|**View** | Click to go to the Delivery Pipelines dashboard. | -|**KPI Averages** | KPI averages for: {::nomarkdown} To see detailed day-to-day values, select a line chart.{:/}| -|**Most Active Delivery Pipelines** | Up to ten pipelines with the highest number of executions. The same KPIs are displayed, and compared to those in the reference period. | -|**Longest Delivery Pipelines** | Up to ten pipelines with the longest duration. The same KPIs are displayed, and compared to those in the reference period. | - -### Related articles -[DORA metrics]({{site.baseurl}}/docs/dashboards/dora-metrics/) -[Monitoring applications]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/) -[Images in Codefresh]({{site.baseurl}}/docs/deployments/gitops/images/) - - diff --git a/_docs/deployments/gitops/applications-dashboard.md b/_docs/deployments/gitops/applications-dashboard.md deleted file mode 100644 index 51907f811..000000000 --- a/_docs/deployments/gitops/applications-dashboard.md +++ /dev/null @@ -1,788 +0,0 @@ ---- -title: "Monitoring GitOps applications" -description: "" -group: deployments -sub_group: gitops -toc: true ---- - -Monitor applications across clusters, and the deployments, resources, and services for an application in the Applications dashboard. As a one-stop shop for Argo Rollouts and Argo CD, the Applications dashboard in Codefresh delivers on the challenge of keeping track of your applications and their deployments, whatever the frequency and scale, across all clusters in your enterprise. A wide range of filters, progressive delivery views, and enriched CI and CD information, provide full traceability and visibility to your deployments. - -Select the view format for the Applications Dashboard, as either [List or Card views](#select-view-mode-for-the-applications-dashboard). The default view displays all applications deployed within the last 30 days. Customize the scope through filters to display the [information](#applications-dashboard-information) you need. - -{% include -image.html -lightbox="true" -file="/images/applications/app-dashboard-main-view.png" -url="/images/applications/app-dashboard-main-view.png" -alt="Applications Dashboard: List view" -caption="Applications Dashboard: List view" -max-width="60%" -%} - - -Monitor the current [health and sync status of applications](#identify-applications-with-warningserrors), and then select an application to drill down into its resources, deployments, and services: -* [View deployment and configuration info for selected application](#view-deployment-and-configuration-info-for-selected-application) -* [Monitor health and sync statuses for selected application](#monitor-health-and-sync-statuses-for-selected-application) -* [Monitor resources for selected application](#monitor-resources-for-selected-application) -* [Monitor deployments for selected application](#monitor-deployments-for-selected-application) -* [Monitor services for selected application](#monitor-services-for-selected-application) - ->For information on creating and managing applications and application resources, see [Creating applications]({{site.baseurl}}/docs/deployments/gitops/create-application/) and [Managing applications]({{site.baseurl}}/docs/deployments/gitops/manage-application/). - -## Select view mode for the Applications dashboard -View deployed applications in either List (the default) or Card views. Both views are sorted by the most recent deployments. - -1. In the Codefresh UI, go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard/list){:target="\_blank"}. -1. Select **List** or **Cards**. - -### Applications List view - -Here is an example of the Applications dashboard in List view mode. - -{% include -image.html -lightbox="true" -file="/images/applications/app-dashboard-main-view.png" -url="/images/applications/app-dashboard-main-view.png" -alt="Applications Dashboard: List view" -caption="Applications Dashboard: List view" -max-width="60%" -%} - -### Applications Card view -Here is an example of the Applications dashboard in Card view mode. The Card view provides a scannable view of application data and the actions to manage applications. - - {% include -image.html -lightbox="true" -file="/images/applications/app-dashboard-card-view.png" -url="/images/applications/app-dashboard-card-view.png" -alt="Applications Dashboard: Card view" -caption="Applications Dashboard: Card view" -max-width="60%" -%} - -## Applications dashboard information -Here's a description of the information and actions in the Applications dashboard. - -{: .table .table-bordered .table-hover} -| Item | Description | -| -------------- | -------------- | -|Application filters | Filter by a range of attributes to customize the information in the dashboard to bring you what you need. {::nomarkdown} {:/}| -|{::nomarkdown}{:/}| Star applications as favorites and view only the starred applications.{::nomarkdown}
Select the to star the application as a favorite.

To filter by favorite applications, on the filters bar, select .
{:/} TIP: If you star applications as favorites in the Applications dashboard, you can filter by the same applications in the [DORA metrics dashboard]({{site.baseurl}}/docs/reporting/dora-metrics/#metrics-for-favorite-applications). | -|Application actions| Options to monitor/manage applications through the application's context menu. {::nomarkdown}